Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/bluetooth/hci_sock.c
50303 views
1
/*
2
BlueZ - Bluetooth protocol stack for Linux
3
Copyright (C) 2000-2001 Qualcomm Incorporated
4
5
Written 2000,2001 by Maxim Krasnyansky <[email protected]>
6
7
This program is free software; you can redistribute it and/or modify
8
it under the terms of the GNU General Public License version 2 as
9
published by the Free Software Foundation;
10
11
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22
SOFTWARE IS DISCLAIMED.
23
*/
24
25
/* Bluetooth HCI sockets. */
26
#include <linux/compat.h>
27
#include <linux/export.h>
28
#include <linux/utsname.h>
29
#include <linux/sched.h>
30
#include <linux/unaligned.h>
31
32
#include <net/bluetooth/bluetooth.h>
33
#include <net/bluetooth/hci_core.h>
34
#include <net/bluetooth/hci_mon.h>
35
#include <net/bluetooth/mgmt.h>
36
37
#include "mgmt_util.h"
38
39
static LIST_HEAD(mgmt_chan_list);
40
static DEFINE_MUTEX(mgmt_chan_list_lock);
41
42
static DEFINE_IDA(sock_cookie_ida);
43
44
static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
46
/* ----- HCI socket interface ----- */
47
48
/* Socket info */
49
#define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51
struct hci_pinfo {
52
struct bt_sock bt;
53
struct hci_dev *hdev;
54
struct hci_filter filter;
55
__u8 cmsg_mask;
56
unsigned short channel;
57
unsigned long flags;
58
__u32 cookie;
59
char comm[TASK_COMM_LEN];
60
__u16 mtu;
61
};
62
63
static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
64
{
65
struct hci_dev *hdev = hci_pi(sk)->hdev;
66
67
if (!hdev)
68
return ERR_PTR(-EBADFD);
69
if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
70
return ERR_PTR(-EPIPE);
71
return hdev;
72
}
73
74
void hci_sock_set_flag(struct sock *sk, int nr)
75
{
76
set_bit(nr, &hci_pi(sk)->flags);
77
}
78
79
void hci_sock_clear_flag(struct sock *sk, int nr)
80
{
81
clear_bit(nr, &hci_pi(sk)->flags);
82
}
83
84
int hci_sock_test_flag(struct sock *sk, int nr)
85
{
86
return test_bit(nr, &hci_pi(sk)->flags);
87
}
88
89
unsigned short hci_sock_get_channel(struct sock *sk)
90
{
91
return hci_pi(sk)->channel;
92
}
93
94
u32 hci_sock_get_cookie(struct sock *sk)
95
{
96
return hci_pi(sk)->cookie;
97
}
98
99
static bool hci_sock_gen_cookie(struct sock *sk)
100
{
101
int id = hci_pi(sk)->cookie;
102
103
if (!id) {
104
id = ida_alloc_min(&sock_cookie_ida, 1, GFP_KERNEL);
105
if (id < 0)
106
id = 0xffffffff;
107
108
hci_pi(sk)->cookie = id;
109
get_task_comm(hci_pi(sk)->comm, current);
110
return true;
111
}
112
113
return false;
114
}
115
116
static void hci_sock_free_cookie(struct sock *sk)
117
{
118
int id = hci_pi(sk)->cookie;
119
120
if (id) {
121
hci_pi(sk)->cookie = 0;
122
ida_free(&sock_cookie_ida, id);
123
}
124
}
125
126
static inline int hci_test_bit(int nr, const void *addr)
127
{
128
return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
129
}
130
131
/* Security filter */
132
#define HCI_SFLT_MAX_OGF 5
133
134
struct hci_sec_filter {
135
__u32 type_mask;
136
__u32 event_mask[2];
137
__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
138
};
139
140
static const struct hci_sec_filter hci_sec_filter = {
141
/* Packet types */
142
0x10,
143
/* Events */
144
{ 0x1000d9fe, 0x0000b00c },
145
/* Commands */
146
{
147
{ 0x0 },
148
/* OGF_LINK_CTL */
149
{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
150
/* OGF_LINK_POLICY */
151
{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
152
/* OGF_HOST_CTL */
153
{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
154
/* OGF_INFO_PARAM */
155
{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
156
/* OGF_STATUS_PARAM */
157
{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
158
}
159
};
160
161
static struct bt_sock_list hci_sk_list = {
162
.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
163
};
164
165
static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
166
{
167
struct hci_filter *flt;
168
int flt_type, flt_event;
169
170
/* Apply filter */
171
flt = &hci_pi(sk)->filter;
172
173
flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
174
175
if (!test_bit(flt_type, &flt->type_mask))
176
return true;
177
178
/* Extra filter for event packets only */
179
if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
180
return false;
181
182
flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
183
184
if (!hci_test_bit(flt_event, &flt->event_mask))
185
return true;
186
187
/* Check filter only when opcode is set */
188
if (!flt->opcode)
189
return false;
190
191
if (flt_event == HCI_EV_CMD_COMPLETE &&
192
flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
193
return true;
194
195
if (flt_event == HCI_EV_CMD_STATUS &&
196
flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
197
return true;
198
199
return false;
200
}
201
202
/* Send frame to RAW socket */
203
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
204
{
205
struct sock *sk;
206
struct sk_buff *skb_copy = NULL;
207
208
BT_DBG("hdev %p len %d", hdev, skb->len);
209
210
read_lock(&hci_sk_list.lock);
211
212
sk_for_each(sk, &hci_sk_list.head) {
213
struct sk_buff *nskb;
214
215
if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
216
continue;
217
218
/* Don't send frame to the socket it came from */
219
if (skb->sk == sk)
220
continue;
221
222
if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
223
if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
224
hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
225
hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
226
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
227
hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
228
continue;
229
if (is_filtered_packet(sk, skb))
230
continue;
231
} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
232
if (!bt_cb(skb)->incoming)
233
continue;
234
if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
235
hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
236
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
237
hci_skb_pkt_type(skb) != HCI_ISODATA_PKT &&
238
hci_skb_pkt_type(skb) != HCI_DRV_PKT)
239
continue;
240
} else {
241
/* Don't send frame to other channel types */
242
continue;
243
}
244
245
if (!skb_copy) {
246
/* Create a private copy with headroom */
247
skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
248
if (!skb_copy)
249
continue;
250
251
/* Put type byte before the data */
252
memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
253
}
254
255
nskb = skb_clone(skb_copy, GFP_ATOMIC);
256
if (!nskb)
257
continue;
258
259
if (sock_queue_rcv_skb(sk, nskb))
260
kfree_skb(nskb);
261
}
262
263
read_unlock(&hci_sk_list.lock);
264
265
kfree_skb(skb_copy);
266
}
267
268
static void hci_sock_copy_creds(struct sock *sk, struct sk_buff *skb)
269
{
270
struct scm_creds *creds;
271
272
if (!sk || WARN_ON(!skb))
273
return;
274
275
creds = &bt_cb(skb)->creds;
276
277
/* Check if peer credentials is set */
278
if (!sk->sk_peer_pid) {
279
/* Check if parent peer credentials is set */
280
if (bt_sk(sk)->parent && bt_sk(sk)->parent->sk_peer_pid)
281
sk = bt_sk(sk)->parent;
282
else
283
return;
284
}
285
286
/* Check if scm_creds already set */
287
if (creds->pid == pid_vnr(sk->sk_peer_pid))
288
return;
289
290
memset(creds, 0, sizeof(*creds));
291
292
creds->pid = pid_vnr(sk->sk_peer_pid);
293
if (sk->sk_peer_cred) {
294
creds->uid = sk->sk_peer_cred->uid;
295
creds->gid = sk->sk_peer_cred->gid;
296
}
297
}
298
299
static struct sk_buff *hci_skb_clone(struct sk_buff *skb)
300
{
301
struct sk_buff *nskb;
302
303
if (!skb)
304
return NULL;
305
306
nskb = skb_clone(skb, GFP_ATOMIC);
307
if (!nskb)
308
return NULL;
309
310
hci_sock_copy_creds(skb->sk, nskb);
311
312
return nskb;
313
}
314
315
/* Send frame to sockets with specific channel */
316
static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
317
int flag, struct sock *skip_sk)
318
{
319
struct sock *sk;
320
321
BT_DBG("channel %u len %d", channel, skb->len);
322
323
sk_for_each(sk, &hci_sk_list.head) {
324
struct sk_buff *nskb;
325
326
/* Ignore socket without the flag set */
327
if (!hci_sock_test_flag(sk, flag))
328
continue;
329
330
/* Skip the original socket */
331
if (sk == skip_sk)
332
continue;
333
334
if (sk->sk_state != BT_BOUND)
335
continue;
336
337
if (hci_pi(sk)->channel != channel)
338
continue;
339
340
nskb = hci_skb_clone(skb);
341
if (!nskb)
342
continue;
343
344
if (sock_queue_rcv_skb(sk, nskb))
345
kfree_skb(nskb);
346
}
347
348
}
349
350
void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
351
int flag, struct sock *skip_sk)
352
{
353
read_lock(&hci_sk_list.lock);
354
__hci_send_to_channel(channel, skb, flag, skip_sk);
355
read_unlock(&hci_sk_list.lock);
356
}
357
358
/* Send frame to monitor socket */
359
void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
360
{
361
struct sk_buff *skb_copy = NULL;
362
struct hci_mon_hdr *hdr;
363
__le16 opcode;
364
365
if (!atomic_read(&monitor_promisc))
366
return;
367
368
BT_DBG("hdev %p len %d", hdev, skb->len);
369
370
switch (hci_skb_pkt_type(skb)) {
371
case HCI_COMMAND_PKT:
372
opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
373
break;
374
case HCI_EVENT_PKT:
375
opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
376
break;
377
case HCI_ACLDATA_PKT:
378
if (bt_cb(skb)->incoming)
379
opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
380
else
381
opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
382
break;
383
case HCI_SCODATA_PKT:
384
if (bt_cb(skb)->incoming)
385
opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
386
else
387
opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
388
break;
389
case HCI_ISODATA_PKT:
390
if (bt_cb(skb)->incoming)
391
opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
392
else
393
opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
394
break;
395
case HCI_DRV_PKT:
396
if (bt_cb(skb)->incoming)
397
opcode = cpu_to_le16(HCI_MON_DRV_RX_PKT);
398
else
399
opcode = cpu_to_le16(HCI_MON_DRV_TX_PKT);
400
break;
401
case HCI_DIAG_PKT:
402
opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
403
break;
404
default:
405
return;
406
}
407
408
/* Create a private copy with headroom */
409
skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
410
if (!skb_copy)
411
return;
412
413
hci_sock_copy_creds(skb->sk, skb_copy);
414
415
/* Put header before the data */
416
hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
417
hdr->opcode = opcode;
418
hdr->index = cpu_to_le16(hdev->id);
419
hdr->len = cpu_to_le16(skb->len);
420
421
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
422
HCI_SOCK_TRUSTED, NULL);
423
kfree_skb(skb_copy);
424
}
425
426
void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
427
void *data, u16 data_len, ktime_t tstamp,
428
int flag, struct sock *skip_sk)
429
{
430
struct sock *sk;
431
__le16 index;
432
433
if (hdev)
434
index = cpu_to_le16(hdev->id);
435
else
436
index = cpu_to_le16(MGMT_INDEX_NONE);
437
438
read_lock(&hci_sk_list.lock);
439
440
sk_for_each(sk, &hci_sk_list.head) {
441
struct hci_mon_hdr *hdr;
442
struct sk_buff *skb;
443
444
if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
445
continue;
446
447
/* Ignore socket without the flag set */
448
if (!hci_sock_test_flag(sk, flag))
449
continue;
450
451
/* Skip the original socket */
452
if (sk == skip_sk)
453
continue;
454
455
skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
456
if (!skb)
457
continue;
458
459
put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
460
put_unaligned_le16(event, skb_put(skb, 2));
461
462
if (data)
463
skb_put_data(skb, data, data_len);
464
465
skb->tstamp = tstamp;
466
467
hdr = skb_push(skb, HCI_MON_HDR_SIZE);
468
hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
469
hdr->index = index;
470
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
471
472
__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
473
HCI_SOCK_TRUSTED, NULL);
474
kfree_skb(skb);
475
}
476
477
read_unlock(&hci_sk_list.lock);
478
}
479
480
static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
481
{
482
struct hci_mon_hdr *hdr;
483
struct hci_mon_new_index *ni;
484
struct hci_mon_index_info *ii;
485
struct sk_buff *skb;
486
__le16 opcode;
487
488
switch (event) {
489
case HCI_DEV_REG:
490
skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
491
if (!skb)
492
return NULL;
493
494
ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
495
ni->type = 0x00; /* Old hdev->dev_type */
496
ni->bus = hdev->bus;
497
bacpy(&ni->bdaddr, &hdev->bdaddr);
498
memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
499
strnlen(hdev->name, sizeof(ni->name)), '\0');
500
501
opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
502
break;
503
504
case HCI_DEV_UNREG:
505
skb = bt_skb_alloc(0, GFP_ATOMIC);
506
if (!skb)
507
return NULL;
508
509
opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
510
break;
511
512
case HCI_DEV_SETUP:
513
if (hdev->manufacturer == 0xffff)
514
return NULL;
515
fallthrough;
516
517
case HCI_DEV_UP:
518
skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
519
if (!skb)
520
return NULL;
521
522
ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
523
bacpy(&ii->bdaddr, &hdev->bdaddr);
524
ii->manufacturer = cpu_to_le16(hdev->manufacturer);
525
526
opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
527
break;
528
529
case HCI_DEV_OPEN:
530
skb = bt_skb_alloc(0, GFP_ATOMIC);
531
if (!skb)
532
return NULL;
533
534
opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
535
break;
536
537
case HCI_DEV_CLOSE:
538
skb = bt_skb_alloc(0, GFP_ATOMIC);
539
if (!skb)
540
return NULL;
541
542
opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
543
break;
544
545
default:
546
return NULL;
547
}
548
549
__net_timestamp(skb);
550
551
hdr = skb_push(skb, HCI_MON_HDR_SIZE);
552
hdr->opcode = opcode;
553
hdr->index = cpu_to_le16(hdev->id);
554
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
555
556
return skb;
557
}
558
559
static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
560
{
561
struct hci_mon_hdr *hdr;
562
struct sk_buff *skb;
563
u16 format;
564
u8 ver[3];
565
u32 flags;
566
567
/* No message needed when cookie is not present */
568
if (!hci_pi(sk)->cookie)
569
return NULL;
570
571
switch (hci_pi(sk)->channel) {
572
case HCI_CHANNEL_RAW:
573
format = 0x0000;
574
ver[0] = BT_SUBSYS_VERSION;
575
put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
576
break;
577
case HCI_CHANNEL_USER:
578
format = 0x0001;
579
ver[0] = BT_SUBSYS_VERSION;
580
put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
581
break;
582
case HCI_CHANNEL_CONTROL:
583
format = 0x0002;
584
mgmt_fill_version_info(ver);
585
break;
586
default:
587
/* No message for unsupported format */
588
return NULL;
589
}
590
591
skb = bt_skb_alloc(14 + TASK_COMM_LEN, GFP_ATOMIC);
592
if (!skb)
593
return NULL;
594
595
hci_sock_copy_creds(sk, skb);
596
597
flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
598
599
put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
600
put_unaligned_le16(format, skb_put(skb, 2));
601
skb_put_data(skb, ver, sizeof(ver));
602
put_unaligned_le32(flags, skb_put(skb, 4));
603
skb_put_u8(skb, TASK_COMM_LEN);
604
skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
605
606
__net_timestamp(skb);
607
608
hdr = skb_push(skb, HCI_MON_HDR_SIZE);
609
hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
610
if (hci_pi(sk)->hdev)
611
hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
612
else
613
hdr->index = cpu_to_le16(HCI_DEV_NONE);
614
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
615
616
return skb;
617
}
618
619
static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
620
{
621
struct hci_mon_hdr *hdr;
622
struct sk_buff *skb;
623
624
/* No message needed when cookie is not present */
625
if (!hci_pi(sk)->cookie)
626
return NULL;
627
628
switch (hci_pi(sk)->channel) {
629
case HCI_CHANNEL_RAW:
630
case HCI_CHANNEL_USER:
631
case HCI_CHANNEL_CONTROL:
632
break;
633
default:
634
/* No message for unsupported format */
635
return NULL;
636
}
637
638
skb = bt_skb_alloc(4, GFP_ATOMIC);
639
if (!skb)
640
return NULL;
641
642
hci_sock_copy_creds(sk, skb);
643
644
put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
645
646
__net_timestamp(skb);
647
648
hdr = skb_push(skb, HCI_MON_HDR_SIZE);
649
hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
650
if (hci_pi(sk)->hdev)
651
hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
652
else
653
hdr->index = cpu_to_le16(HCI_DEV_NONE);
654
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
655
656
return skb;
657
}
658
659
static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
660
u16 opcode, u16 len,
661
const void *buf)
662
{
663
struct hci_mon_hdr *hdr;
664
struct sk_buff *skb;
665
666
skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
667
if (!skb)
668
return NULL;
669
670
hci_sock_copy_creds(sk, skb);
671
672
put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
673
put_unaligned_le16(opcode, skb_put(skb, 2));
674
675
if (buf)
676
skb_put_data(skb, buf, len);
677
678
__net_timestamp(skb);
679
680
hdr = skb_push(skb, HCI_MON_HDR_SIZE);
681
hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
682
hdr->index = cpu_to_le16(index);
683
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
684
685
return skb;
686
}
687
688
static void __printf(2, 3)
689
send_monitor_note(struct sock *sk, const char *fmt, ...)
690
{
691
size_t len;
692
struct hci_mon_hdr *hdr;
693
struct sk_buff *skb;
694
va_list args;
695
696
va_start(args, fmt);
697
len = vsnprintf(NULL, 0, fmt, args);
698
va_end(args);
699
700
skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
701
if (!skb)
702
return;
703
704
hci_sock_copy_creds(sk, skb);
705
706
va_start(args, fmt);
707
vsprintf(skb_put(skb, len), fmt, args);
708
*(u8 *)skb_put(skb, 1) = 0;
709
va_end(args);
710
711
__net_timestamp(skb);
712
713
hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
714
hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
715
hdr->index = cpu_to_le16(HCI_DEV_NONE);
716
hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
717
718
if (sock_queue_rcv_skb(sk, skb))
719
kfree_skb(skb);
720
}
721
722
static void send_monitor_replay(struct sock *sk)
723
{
724
struct hci_dev *hdev;
725
726
read_lock(&hci_dev_list_lock);
727
728
list_for_each_entry(hdev, &hci_dev_list, list) {
729
struct sk_buff *skb;
730
731
skb = create_monitor_event(hdev, HCI_DEV_REG);
732
if (!skb)
733
continue;
734
735
if (sock_queue_rcv_skb(sk, skb))
736
kfree_skb(skb);
737
738
if (!test_bit(HCI_RUNNING, &hdev->flags))
739
continue;
740
741
skb = create_monitor_event(hdev, HCI_DEV_OPEN);
742
if (!skb)
743
continue;
744
745
if (sock_queue_rcv_skb(sk, skb))
746
kfree_skb(skb);
747
748
if (test_bit(HCI_UP, &hdev->flags))
749
skb = create_monitor_event(hdev, HCI_DEV_UP);
750
else if (hci_dev_test_flag(hdev, HCI_SETUP))
751
skb = create_monitor_event(hdev, HCI_DEV_SETUP);
752
else
753
skb = NULL;
754
755
if (skb) {
756
if (sock_queue_rcv_skb(sk, skb))
757
kfree_skb(skb);
758
}
759
}
760
761
read_unlock(&hci_dev_list_lock);
762
}
763
764
static void send_monitor_control_replay(struct sock *mon_sk)
765
{
766
struct sock *sk;
767
768
read_lock(&hci_sk_list.lock);
769
770
sk_for_each(sk, &hci_sk_list.head) {
771
struct sk_buff *skb;
772
773
skb = create_monitor_ctrl_open(sk);
774
if (!skb)
775
continue;
776
777
if (sock_queue_rcv_skb(mon_sk, skb))
778
kfree_skb(skb);
779
}
780
781
read_unlock(&hci_sk_list.lock);
782
}
783
784
/* Generate internal stack event */
785
static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
786
{
787
struct hci_event_hdr *hdr;
788
struct hci_ev_stack_internal *ev;
789
struct sk_buff *skb;
790
791
skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
792
if (!skb)
793
return;
794
795
hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
796
hdr->evt = HCI_EV_STACK_INTERNAL;
797
hdr->plen = sizeof(*ev) + dlen;
798
799
ev = skb_put(skb, sizeof(*ev) + dlen);
800
ev->type = type;
801
memcpy(ev->data, data, dlen);
802
803
bt_cb(skb)->incoming = 1;
804
__net_timestamp(skb);
805
806
hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
807
hci_send_to_sock(hdev, skb);
808
kfree_skb(skb);
809
}
810
811
void hci_sock_dev_event(struct hci_dev *hdev, int event)
812
{
813
BT_DBG("hdev %s event %d", hdev->name, event);
814
815
if (atomic_read(&monitor_promisc)) {
816
struct sk_buff *skb;
817
818
/* Send event to monitor */
819
skb = create_monitor_event(hdev, event);
820
if (skb) {
821
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
822
HCI_SOCK_TRUSTED, NULL);
823
kfree_skb(skb);
824
}
825
}
826
827
if (event <= HCI_DEV_DOWN) {
828
struct hci_ev_si_device ev;
829
830
/* Send event to sockets */
831
ev.event = event;
832
ev.dev_id = hdev->id;
833
hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
834
}
835
836
if (event == HCI_DEV_UNREG) {
837
struct sock *sk;
838
839
/* Wake up sockets using this dead device */
840
read_lock(&hci_sk_list.lock);
841
sk_for_each(sk, &hci_sk_list.head) {
842
if (hci_pi(sk)->hdev == hdev) {
843
sk->sk_err = EPIPE;
844
sk->sk_state_change(sk);
845
}
846
}
847
read_unlock(&hci_sk_list.lock);
848
}
849
}
850
851
static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
852
{
853
struct hci_mgmt_chan *c;
854
855
list_for_each_entry(c, &mgmt_chan_list, list) {
856
if (c->channel == channel)
857
return c;
858
}
859
860
return NULL;
861
}
862
863
static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
864
{
865
struct hci_mgmt_chan *c;
866
867
mutex_lock(&mgmt_chan_list_lock);
868
c = __hci_mgmt_chan_find(channel);
869
mutex_unlock(&mgmt_chan_list_lock);
870
871
return c;
872
}
873
874
int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
875
{
876
if (c->channel < HCI_CHANNEL_CONTROL)
877
return -EINVAL;
878
879
mutex_lock(&mgmt_chan_list_lock);
880
if (__hci_mgmt_chan_find(c->channel)) {
881
mutex_unlock(&mgmt_chan_list_lock);
882
return -EALREADY;
883
}
884
885
list_add_tail(&c->list, &mgmt_chan_list);
886
887
mutex_unlock(&mgmt_chan_list_lock);
888
889
return 0;
890
}
891
EXPORT_SYMBOL(hci_mgmt_chan_register);
892
893
void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
894
{
895
mutex_lock(&mgmt_chan_list_lock);
896
list_del(&c->list);
897
mutex_unlock(&mgmt_chan_list_lock);
898
}
899
EXPORT_SYMBOL(hci_mgmt_chan_unregister);
900
901
static int hci_sock_release(struct socket *sock)
902
{
903
struct sock *sk = sock->sk;
904
struct hci_dev *hdev;
905
struct sk_buff *skb;
906
907
BT_DBG("sock %p sk %p", sock, sk);
908
909
if (!sk)
910
return 0;
911
912
lock_sock(sk);
913
914
switch (hci_pi(sk)->channel) {
915
case HCI_CHANNEL_MONITOR:
916
atomic_dec(&monitor_promisc);
917
break;
918
case HCI_CHANNEL_RAW:
919
case HCI_CHANNEL_USER:
920
case HCI_CHANNEL_CONTROL:
921
/* Send event to monitor */
922
skb = create_monitor_ctrl_close(sk);
923
if (skb) {
924
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
925
HCI_SOCK_TRUSTED, NULL);
926
kfree_skb(skb);
927
}
928
929
hci_sock_free_cookie(sk);
930
break;
931
}
932
933
bt_sock_unlink(&hci_sk_list, sk);
934
935
hdev = hci_pi(sk)->hdev;
936
if (hdev) {
937
if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
938
!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
939
/* When releasing a user channel exclusive access,
940
* call hci_dev_do_close directly instead of calling
941
* hci_dev_close to ensure the exclusive access will
942
* be released and the controller brought back down.
943
*
944
* The checking of HCI_AUTO_OFF is not needed in this
945
* case since it will have been cleared already when
946
* opening the user channel.
947
*
948
* Make sure to also check that we haven't already
949
* unregistered since all the cleanup will have already
950
* been complete and hdev will get released when we put
951
* below.
952
*/
953
hci_dev_do_close(hdev);
954
hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
955
mgmt_index_added(hdev);
956
}
957
958
atomic_dec(&hdev->promisc);
959
hci_dev_put(hdev);
960
}
961
962
sock_orphan(sk);
963
release_sock(sk);
964
sock_put(sk);
965
return 0;
966
}
967
968
static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
969
{
970
bdaddr_t bdaddr;
971
int err;
972
973
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
974
return -EFAULT;
975
976
hci_dev_lock(hdev);
977
978
err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
979
980
hci_dev_unlock(hdev);
981
982
return err;
983
}
984
985
static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
986
{
987
bdaddr_t bdaddr;
988
int err;
989
990
if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
991
return -EFAULT;
992
993
hci_dev_lock(hdev);
994
995
err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
996
997
hci_dev_unlock(hdev);
998
999
return err;
1000
}
1001
1002
/* Ioctls that require bound socket */
1003
static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
1004
unsigned long arg)
1005
{
1006
struct hci_dev *hdev = hci_hdev_from_sock(sk);
1007
1008
if (IS_ERR(hdev))
1009
return PTR_ERR(hdev);
1010
1011
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1012
return -EBUSY;
1013
1014
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1015
return -EOPNOTSUPP;
1016
1017
switch (cmd) {
1018
case HCISETRAW:
1019
if (!capable(CAP_NET_ADMIN))
1020
return -EPERM;
1021
return -EOPNOTSUPP;
1022
1023
case HCIGETCONNINFO:
1024
return hci_get_conn_info(hdev, (void __user *)arg);
1025
1026
case HCIGETAUTHINFO:
1027
return hci_get_auth_info(hdev, (void __user *)arg);
1028
1029
case HCIBLOCKADDR:
1030
if (!capable(CAP_NET_ADMIN))
1031
return -EPERM;
1032
return hci_sock_reject_list_add(hdev, (void __user *)arg);
1033
1034
case HCIUNBLOCKADDR:
1035
if (!capable(CAP_NET_ADMIN))
1036
return -EPERM;
1037
return hci_sock_reject_list_del(hdev, (void __user *)arg);
1038
}
1039
1040
return -ENOIOCTLCMD;
1041
}
1042
1043
static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
1044
unsigned long arg)
1045
{
1046
void __user *argp = (void __user *)arg;
1047
struct sock *sk = sock->sk;
1048
int err;
1049
1050
BT_DBG("cmd %x arg %lx", cmd, arg);
1051
1052
/* Make sure the cmd is valid before doing anything */
1053
switch (cmd) {
1054
case HCIGETDEVLIST:
1055
case HCIGETDEVINFO:
1056
case HCIGETCONNLIST:
1057
case HCIDEVUP:
1058
case HCIDEVDOWN:
1059
case HCIDEVRESET:
1060
case HCIDEVRESTAT:
1061
case HCISETSCAN:
1062
case HCISETAUTH:
1063
case HCISETENCRYPT:
1064
case HCISETPTYPE:
1065
case HCISETLINKPOL:
1066
case HCISETLINKMODE:
1067
case HCISETACLMTU:
1068
case HCISETSCOMTU:
1069
case HCIINQUIRY:
1070
case HCISETRAW:
1071
case HCIGETCONNINFO:
1072
case HCIGETAUTHINFO:
1073
case HCIBLOCKADDR:
1074
case HCIUNBLOCKADDR:
1075
break;
1076
default:
1077
return -ENOIOCTLCMD;
1078
}
1079
1080
lock_sock(sk);
1081
1082
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1083
err = -EBADFD;
1084
goto done;
1085
}
1086
1087
/* When calling an ioctl on an unbound raw socket, then ensure
1088
* that the monitor gets informed. Ensure that the resulting event
1089
* is only send once by checking if the cookie exists or not. The
1090
* socket cookie will be only ever generated once for the lifetime
1091
* of a given socket.
1092
*/
1093
if (hci_sock_gen_cookie(sk)) {
1094
struct sk_buff *skb;
1095
1096
/* Perform careful checks before setting the HCI_SOCK_TRUSTED
1097
* flag. Make sure that not only the current task but also
1098
* the socket opener has the required capability, since
1099
* privileged programs can be tricked into making ioctl calls
1100
* on HCI sockets, and the socket should not be marked as
1101
* trusted simply because the ioctl caller is privileged.
1102
*/
1103
if (sk_capable(sk, CAP_NET_ADMIN))
1104
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1105
1106
/* Send event to monitor */
1107
skb = create_monitor_ctrl_open(sk);
1108
if (skb) {
1109
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1110
HCI_SOCK_TRUSTED, NULL);
1111
kfree_skb(skb);
1112
}
1113
}
1114
1115
release_sock(sk);
1116
1117
switch (cmd) {
1118
case HCIGETDEVLIST:
1119
return hci_get_dev_list(argp);
1120
1121
case HCIGETDEVINFO:
1122
return hci_get_dev_info(argp);
1123
1124
case HCIGETCONNLIST:
1125
return hci_get_conn_list(argp);
1126
1127
case HCIDEVUP:
1128
if (!capable(CAP_NET_ADMIN))
1129
return -EPERM;
1130
return hci_dev_open(arg);
1131
1132
case HCIDEVDOWN:
1133
if (!capable(CAP_NET_ADMIN))
1134
return -EPERM;
1135
return hci_dev_close(arg);
1136
1137
case HCIDEVRESET:
1138
if (!capable(CAP_NET_ADMIN))
1139
return -EPERM;
1140
return hci_dev_reset(arg);
1141
1142
case HCIDEVRESTAT:
1143
if (!capable(CAP_NET_ADMIN))
1144
return -EPERM;
1145
return hci_dev_reset_stat(arg);
1146
1147
case HCISETSCAN:
1148
case HCISETAUTH:
1149
case HCISETENCRYPT:
1150
case HCISETPTYPE:
1151
case HCISETLINKPOL:
1152
case HCISETLINKMODE:
1153
case HCISETACLMTU:
1154
case HCISETSCOMTU:
1155
if (!capable(CAP_NET_ADMIN))
1156
return -EPERM;
1157
return hci_dev_cmd(cmd, argp);
1158
1159
case HCIINQUIRY:
1160
return hci_inquiry(argp);
1161
}
1162
1163
lock_sock(sk);
1164
1165
err = hci_sock_bound_ioctl(sk, cmd, arg);
1166
1167
done:
1168
release_sock(sk);
1169
return err;
1170
}
1171
1172
#ifdef CONFIG_COMPAT
1173
static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1174
unsigned long arg)
1175
{
1176
switch (cmd) {
1177
case HCIDEVUP:
1178
case HCIDEVDOWN:
1179
case HCIDEVRESET:
1180
case HCIDEVRESTAT:
1181
return hci_sock_ioctl(sock, cmd, arg);
1182
}
1183
1184
return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1185
}
1186
#endif
1187
1188
static int hci_sock_bind(struct socket *sock, struct sockaddr_unsized *addr,
1189
int addr_len)
1190
{
1191
struct sockaddr_hci haddr;
1192
struct sock *sk = sock->sk;
1193
struct hci_dev *hdev = NULL;
1194
struct sk_buff *skb;
1195
int len, err = 0;
1196
1197
BT_DBG("sock %p sk %p", sock, sk);
1198
1199
if (!addr)
1200
return -EINVAL;
1201
1202
memset(&haddr, 0, sizeof(haddr));
1203
len = min_t(unsigned int, sizeof(haddr), addr_len);
1204
memcpy(&haddr, addr, len);
1205
1206
if (haddr.hci_family != AF_BLUETOOTH)
1207
return -EINVAL;
1208
1209
lock_sock(sk);
1210
1211
/* Allow detaching from dead device and attaching to alive device, if
1212
* the caller wants to re-bind (instead of close) this socket in
1213
* response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1214
*/
1215
hdev = hci_pi(sk)->hdev;
1216
if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1217
hci_pi(sk)->hdev = NULL;
1218
sk->sk_state = BT_OPEN;
1219
hci_dev_put(hdev);
1220
}
1221
hdev = NULL;
1222
1223
if (sk->sk_state == BT_BOUND) {
1224
err = -EALREADY;
1225
goto done;
1226
}
1227
1228
switch (haddr.hci_channel) {
1229
case HCI_CHANNEL_RAW:
1230
if (hci_pi(sk)->hdev) {
1231
err = -EALREADY;
1232
goto done;
1233
}
1234
1235
if (haddr.hci_dev != HCI_DEV_NONE) {
1236
hdev = hci_dev_get(haddr.hci_dev);
1237
if (!hdev) {
1238
err = -ENODEV;
1239
goto done;
1240
}
1241
1242
atomic_inc(&hdev->promisc);
1243
}
1244
1245
hci_pi(sk)->channel = haddr.hci_channel;
1246
1247
if (!hci_sock_gen_cookie(sk)) {
1248
/* In the case when a cookie has already been assigned,
1249
* then there has been already an ioctl issued against
1250
* an unbound socket and with that triggered an open
1251
* notification. Send a close notification first to
1252
* allow the state transition to bounded.
1253
*/
1254
skb = create_monitor_ctrl_close(sk);
1255
if (skb) {
1256
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1257
HCI_SOCK_TRUSTED, NULL);
1258
kfree_skb(skb);
1259
}
1260
}
1261
1262
if (capable(CAP_NET_ADMIN))
1263
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1264
1265
hci_pi(sk)->hdev = hdev;
1266
1267
/* Send event to monitor */
1268
skb = create_monitor_ctrl_open(sk);
1269
if (skb) {
1270
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1271
HCI_SOCK_TRUSTED, NULL);
1272
kfree_skb(skb);
1273
}
1274
break;
1275
1276
case HCI_CHANNEL_USER:
1277
if (hci_pi(sk)->hdev) {
1278
err = -EALREADY;
1279
goto done;
1280
}
1281
1282
if (haddr.hci_dev == HCI_DEV_NONE) {
1283
err = -EINVAL;
1284
goto done;
1285
}
1286
1287
if (!capable(CAP_NET_ADMIN)) {
1288
err = -EPERM;
1289
goto done;
1290
}
1291
1292
hdev = hci_dev_get(haddr.hci_dev);
1293
if (!hdev) {
1294
err = -ENODEV;
1295
goto done;
1296
}
1297
1298
if (test_bit(HCI_INIT, &hdev->flags) ||
1299
hci_dev_test_flag(hdev, HCI_SETUP) ||
1300
hci_dev_test_flag(hdev, HCI_CONFIG) ||
1301
(!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1302
test_bit(HCI_UP, &hdev->flags))) {
1303
err = -EBUSY;
1304
hci_dev_put(hdev);
1305
goto done;
1306
}
1307
1308
if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1309
err = -EUSERS;
1310
hci_dev_put(hdev);
1311
goto done;
1312
}
1313
1314
hci_dev_lock(hdev);
1315
mgmt_index_removed(hdev);
1316
hci_dev_unlock(hdev);
1317
1318
err = hci_dev_open(hdev->id);
1319
if (err) {
1320
if (err == -EALREADY) {
1321
/* In case the transport is already up and
1322
* running, clear the error here.
1323
*
1324
* This can happen when opening a user
1325
* channel and HCI_AUTO_OFF grace period
1326
* is still active.
1327
*/
1328
err = 0;
1329
} else {
1330
hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1331
mgmt_index_added(hdev);
1332
hci_dev_put(hdev);
1333
goto done;
1334
}
1335
}
1336
1337
hci_pi(sk)->channel = haddr.hci_channel;
1338
1339
if (!hci_sock_gen_cookie(sk)) {
1340
/* In the case when a cookie has already been assigned,
1341
* this socket will transition from a raw socket into
1342
* a user channel socket. For a clean transition, send
1343
* the close notification first.
1344
*/
1345
skb = create_monitor_ctrl_close(sk);
1346
if (skb) {
1347
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1348
HCI_SOCK_TRUSTED, NULL);
1349
kfree_skb(skb);
1350
}
1351
}
1352
1353
/* The user channel is restricted to CAP_NET_ADMIN
1354
* capabilities and with that implicitly trusted.
1355
*/
1356
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1357
1358
hci_pi(sk)->hdev = hdev;
1359
1360
/* Send event to monitor */
1361
skb = create_monitor_ctrl_open(sk);
1362
if (skb) {
1363
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1364
HCI_SOCK_TRUSTED, NULL);
1365
kfree_skb(skb);
1366
}
1367
1368
atomic_inc(&hdev->promisc);
1369
break;
1370
1371
case HCI_CHANNEL_MONITOR:
1372
if (haddr.hci_dev != HCI_DEV_NONE) {
1373
err = -EINVAL;
1374
goto done;
1375
}
1376
1377
if (!capable(CAP_NET_RAW)) {
1378
err = -EPERM;
1379
goto done;
1380
}
1381
1382
hci_pi(sk)->channel = haddr.hci_channel;
1383
1384
/* The monitor interface is restricted to CAP_NET_RAW
1385
* capabilities and with that implicitly trusted.
1386
*/
1387
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1388
1389
send_monitor_note(sk, "Linux version %s (%s)",
1390
init_utsname()->release,
1391
init_utsname()->machine);
1392
send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1393
BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1394
send_monitor_replay(sk);
1395
send_monitor_control_replay(sk);
1396
1397
atomic_inc(&monitor_promisc);
1398
break;
1399
1400
case HCI_CHANNEL_LOGGING:
1401
if (haddr.hci_dev != HCI_DEV_NONE) {
1402
err = -EINVAL;
1403
goto done;
1404
}
1405
1406
if (!capable(CAP_NET_ADMIN)) {
1407
err = -EPERM;
1408
goto done;
1409
}
1410
1411
hci_pi(sk)->channel = haddr.hci_channel;
1412
break;
1413
1414
default:
1415
if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1416
err = -EINVAL;
1417
goto done;
1418
}
1419
1420
if (haddr.hci_dev != HCI_DEV_NONE) {
1421
err = -EINVAL;
1422
goto done;
1423
}
1424
1425
/* Users with CAP_NET_ADMIN capabilities are allowed
1426
* access to all management commands and events. For
1427
* untrusted users the interface is restricted and
1428
* also only untrusted events are sent.
1429
*/
1430
if (capable(CAP_NET_ADMIN))
1431
hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1432
1433
hci_pi(sk)->channel = haddr.hci_channel;
1434
1435
/* At the moment the index and unconfigured index events
1436
* are enabled unconditionally. Setting them on each
1437
* socket when binding keeps this functionality. They
1438
* however might be cleared later and then sending of these
1439
* events will be disabled, but that is then intentional.
1440
*
1441
* This also enables generic events that are safe to be
1442
* received by untrusted users. Example for such events
1443
* are changes to settings, class of device, name etc.
1444
*/
1445
if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1446
if (!hci_sock_gen_cookie(sk)) {
1447
/* In the case when a cookie has already been
1448
* assigned, this socket will transition from
1449
* a raw socket into a control socket. To
1450
* allow for a clean transition, send the
1451
* close notification first.
1452
*/
1453
skb = create_monitor_ctrl_close(sk);
1454
if (skb) {
1455
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1456
HCI_SOCK_TRUSTED, NULL);
1457
kfree_skb(skb);
1458
}
1459
}
1460
1461
/* Send event to monitor */
1462
skb = create_monitor_ctrl_open(sk);
1463
if (skb) {
1464
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1465
HCI_SOCK_TRUSTED, NULL);
1466
kfree_skb(skb);
1467
}
1468
1469
hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1470
hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1471
hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1472
hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1473
hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1474
hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1475
}
1476
break;
1477
}
1478
1479
/* Default MTU to HCI_MAX_FRAME_SIZE if not set */
1480
if (!hci_pi(sk)->mtu)
1481
hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE;
1482
1483
sk->sk_state = BT_BOUND;
1484
1485
done:
1486
release_sock(sk);
1487
return err;
1488
}
1489
1490
static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1491
int peer)
1492
{
1493
struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1494
struct sock *sk = sock->sk;
1495
struct hci_dev *hdev;
1496
int err = 0;
1497
1498
BT_DBG("sock %p sk %p", sock, sk);
1499
1500
if (peer)
1501
return -EOPNOTSUPP;
1502
1503
lock_sock(sk);
1504
1505
hdev = hci_hdev_from_sock(sk);
1506
if (IS_ERR(hdev)) {
1507
err = PTR_ERR(hdev);
1508
goto done;
1509
}
1510
1511
haddr->hci_family = AF_BLUETOOTH;
1512
haddr->hci_dev = hdev->id;
1513
haddr->hci_channel= hci_pi(sk)->channel;
1514
err = sizeof(*haddr);
1515
1516
done:
1517
release_sock(sk);
1518
return err;
1519
}
1520
1521
static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1522
struct sk_buff *skb)
1523
{
1524
__u8 mask = hci_pi(sk)->cmsg_mask;
1525
1526
if (mask & HCI_CMSG_DIR) {
1527
int incoming = bt_cb(skb)->incoming;
1528
put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1529
&incoming);
1530
}
1531
1532
if (mask & HCI_CMSG_TSTAMP) {
1533
#ifdef CONFIG_COMPAT
1534
struct old_timeval32 ctv;
1535
#endif
1536
struct __kernel_old_timeval tv;
1537
void *data;
1538
int len;
1539
1540
skb_get_timestamp(skb, &tv);
1541
1542
data = &tv;
1543
len = sizeof(tv);
1544
#ifdef CONFIG_COMPAT
1545
if (!COMPAT_USE_64BIT_TIME &&
1546
(msg->msg_flags & MSG_CMSG_COMPAT)) {
1547
ctv.tv_sec = tv.tv_sec;
1548
ctv.tv_usec = tv.tv_usec;
1549
data = &ctv;
1550
len = sizeof(ctv);
1551
}
1552
#endif
1553
1554
put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1555
}
1556
}
1557
1558
static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1559
size_t len, int flags)
1560
{
1561
struct scm_cookie scm;
1562
struct sock *sk = sock->sk;
1563
struct sk_buff *skb;
1564
int copied, err;
1565
unsigned int skblen;
1566
1567
BT_DBG("sock %p, sk %p", sock, sk);
1568
1569
if (flags & MSG_OOB)
1570
return -EOPNOTSUPP;
1571
1572
if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1573
return -EOPNOTSUPP;
1574
1575
if (sk->sk_state == BT_CLOSED)
1576
return 0;
1577
1578
skb = skb_recv_datagram(sk, flags, &err);
1579
if (!skb)
1580
return err;
1581
1582
skblen = skb->len;
1583
copied = skb->len;
1584
if (len < copied) {
1585
msg->msg_flags |= MSG_TRUNC;
1586
copied = len;
1587
}
1588
1589
skb_reset_transport_header(skb);
1590
err = skb_copy_datagram_msg(skb, 0, msg, copied);
1591
1592
switch (hci_pi(sk)->channel) {
1593
case HCI_CHANNEL_RAW:
1594
hci_sock_cmsg(sk, msg, skb);
1595
break;
1596
case HCI_CHANNEL_USER:
1597
case HCI_CHANNEL_MONITOR:
1598
sock_recv_timestamp(msg, sk, skb);
1599
break;
1600
default:
1601
if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1602
sock_recv_timestamp(msg, sk, skb);
1603
break;
1604
}
1605
1606
memset(&scm, 0, sizeof(scm));
1607
scm.creds = bt_cb(skb)->creds;
1608
1609
skb_free_datagram(sk, skb);
1610
1611
if (flags & MSG_TRUNC)
1612
copied = skblen;
1613
1614
scm_recv(sock, msg, &scm, flags);
1615
1616
return err ? : copied;
1617
}
1618
1619
static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1620
struct sk_buff *skb)
1621
{
1622
u8 *cp;
1623
struct mgmt_hdr *hdr;
1624
u16 opcode, index, len;
1625
struct hci_dev *hdev = NULL;
1626
const struct hci_mgmt_handler *handler;
1627
bool var_len, no_hdev;
1628
int err;
1629
1630
BT_DBG("got %d bytes", skb->len);
1631
1632
if (skb->len < sizeof(*hdr))
1633
return -EINVAL;
1634
1635
hdr = (void *)skb->data;
1636
opcode = __le16_to_cpu(hdr->opcode);
1637
index = __le16_to_cpu(hdr->index);
1638
len = __le16_to_cpu(hdr->len);
1639
1640
if (len != skb->len - sizeof(*hdr)) {
1641
err = -EINVAL;
1642
goto done;
1643
}
1644
1645
if (chan->channel == HCI_CHANNEL_CONTROL) {
1646
struct sk_buff *cmd;
1647
1648
/* Send event to monitor */
1649
cmd = create_monitor_ctrl_command(sk, index, opcode, len,
1650
skb->data + sizeof(*hdr));
1651
if (cmd) {
1652
hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd,
1653
HCI_SOCK_TRUSTED, NULL);
1654
kfree_skb(cmd);
1655
}
1656
}
1657
1658
if (opcode >= chan->handler_count ||
1659
chan->handlers[opcode].func == NULL) {
1660
BT_DBG("Unknown op %u", opcode);
1661
err = mgmt_cmd_status(sk, index, opcode,
1662
MGMT_STATUS_UNKNOWN_COMMAND);
1663
goto done;
1664
}
1665
1666
handler = &chan->handlers[opcode];
1667
1668
if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1669
!(handler->flags & HCI_MGMT_UNTRUSTED)) {
1670
err = mgmt_cmd_status(sk, index, opcode,
1671
MGMT_STATUS_PERMISSION_DENIED);
1672
goto done;
1673
}
1674
1675
if (index != MGMT_INDEX_NONE) {
1676
hdev = hci_dev_get(index);
1677
if (!hdev) {
1678
err = mgmt_cmd_status(sk, index, opcode,
1679
MGMT_STATUS_INVALID_INDEX);
1680
goto done;
1681
}
1682
1683
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1684
hci_dev_test_flag(hdev, HCI_CONFIG) ||
1685
hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1686
err = mgmt_cmd_status(sk, index, opcode,
1687
MGMT_STATUS_INVALID_INDEX);
1688
goto done;
1689
}
1690
1691
if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1692
!(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1693
err = mgmt_cmd_status(sk, index, opcode,
1694
MGMT_STATUS_INVALID_INDEX);
1695
goto done;
1696
}
1697
}
1698
1699
if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1700
no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1701
if (no_hdev != !hdev) {
1702
err = mgmt_cmd_status(sk, index, opcode,
1703
MGMT_STATUS_INVALID_INDEX);
1704
goto done;
1705
}
1706
}
1707
1708
var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1709
if ((var_len && len < handler->data_len) ||
1710
(!var_len && len != handler->data_len)) {
1711
err = mgmt_cmd_status(sk, index, opcode,
1712
MGMT_STATUS_INVALID_PARAMS);
1713
goto done;
1714
}
1715
1716
if (hdev && chan->hdev_init)
1717
chan->hdev_init(sk, hdev);
1718
1719
cp = skb->data + sizeof(*hdr);
1720
1721
err = handler->func(sk, hdev, cp, len);
1722
if (err < 0)
1723
goto done;
1724
1725
err = skb->len;
1726
1727
done:
1728
if (hdev)
1729
hci_dev_put(hdev);
1730
1731
return err;
1732
}
1733
1734
static int hci_logging_frame(struct sock *sk, struct sk_buff *skb,
1735
unsigned int flags)
1736
{
1737
struct hci_mon_hdr *hdr;
1738
struct hci_dev *hdev;
1739
u16 index;
1740
int err;
1741
1742
/* The logging frame consists at minimum of the standard header,
1743
* the priority byte, the ident length byte and at least one string
1744
* terminator NUL byte. Anything shorter are invalid packets.
1745
*/
1746
if (skb->len < sizeof(*hdr) + 3)
1747
return -EINVAL;
1748
1749
hdr = (void *)skb->data;
1750
1751
if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr))
1752
return -EINVAL;
1753
1754
if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1755
__u8 priority = skb->data[sizeof(*hdr)];
1756
__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1757
1758
/* Only the priorities 0-7 are valid and with that any other
1759
* value results in an invalid packet.
1760
*
1761
* The priority byte is followed by an ident length byte and
1762
* the NUL terminated ident string. Check that the ident
1763
* length is not overflowing the packet and also that the
1764
* ident string itself is NUL terminated. In case the ident
1765
* length is zero, the length value actually doubles as NUL
1766
* terminator identifier.
1767
*
1768
* The message follows the ident string (if present) and
1769
* must be NUL terminated. Otherwise it is not a valid packet.
1770
*/
1771
if (priority > 7 || skb->data[skb->len - 1] != 0x00 ||
1772
ident_len > skb->len - sizeof(*hdr) - 3 ||
1773
skb->data[sizeof(*hdr) + ident_len + 1] != 0x00)
1774
return -EINVAL;
1775
} else {
1776
return -EINVAL;
1777
}
1778
1779
index = __le16_to_cpu(hdr->index);
1780
1781
if (index != MGMT_INDEX_NONE) {
1782
hdev = hci_dev_get(index);
1783
if (!hdev)
1784
return -ENODEV;
1785
} else {
1786
hdev = NULL;
1787
}
1788
1789
hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1790
1791
hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1792
err = skb->len;
1793
1794
if (hdev)
1795
hci_dev_put(hdev);
1796
1797
return err;
1798
}
1799
1800
static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1801
size_t len)
1802
{
1803
struct sock *sk = sock->sk;
1804
struct hci_mgmt_chan *chan;
1805
struct hci_dev *hdev;
1806
struct sk_buff *skb;
1807
int err;
1808
const unsigned int flags = msg->msg_flags;
1809
1810
BT_DBG("sock %p sk %p", sock, sk);
1811
1812
if (flags & MSG_OOB)
1813
return -EOPNOTSUPP;
1814
1815
if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
1816
return -EINVAL;
1817
1818
if (len < 4 || len > hci_pi(sk)->mtu)
1819
return -EINVAL;
1820
1821
skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0);
1822
if (IS_ERR(skb))
1823
return PTR_ERR(skb);
1824
1825
lock_sock(sk);
1826
1827
switch (hci_pi(sk)->channel) {
1828
case HCI_CHANNEL_RAW:
1829
case HCI_CHANNEL_USER:
1830
break;
1831
case HCI_CHANNEL_MONITOR:
1832
err = -EOPNOTSUPP;
1833
goto drop;
1834
case HCI_CHANNEL_LOGGING:
1835
err = hci_logging_frame(sk, skb, flags);
1836
goto drop;
1837
default:
1838
mutex_lock(&mgmt_chan_list_lock);
1839
chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1840
if (chan)
1841
err = hci_mgmt_cmd(chan, sk, skb);
1842
else
1843
err = -EINVAL;
1844
1845
mutex_unlock(&mgmt_chan_list_lock);
1846
goto drop;
1847
}
1848
1849
hdev = hci_hdev_from_sock(sk);
1850
if (IS_ERR(hdev)) {
1851
err = PTR_ERR(hdev);
1852
goto drop;
1853
}
1854
1855
if (!test_bit(HCI_UP, &hdev->flags)) {
1856
err = -ENETDOWN;
1857
goto drop;
1858
}
1859
1860
hci_skb_pkt_type(skb) = skb->data[0];
1861
skb_pull(skb, 1);
1862
1863
if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1864
/* No permission check is needed for user channel
1865
* since that gets enforced when binding the socket.
1866
*
1867
* However check that the packet type is valid.
1868
*/
1869
if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1870
hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1871
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1872
hci_skb_pkt_type(skb) != HCI_ISODATA_PKT &&
1873
hci_skb_pkt_type(skb) != HCI_DRV_PKT) {
1874
err = -EINVAL;
1875
goto drop;
1876
}
1877
1878
skb_queue_tail(&hdev->raw_q, skb);
1879
queue_work(hdev->workqueue, &hdev->tx_work);
1880
} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1881
u16 opcode = get_unaligned_le16(skb->data);
1882
u16 ogf = hci_opcode_ogf(opcode);
1883
u16 ocf = hci_opcode_ocf(opcode);
1884
1885
if (((ogf > HCI_SFLT_MAX_OGF) ||
1886
!hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1887
&hci_sec_filter.ocf_mask[ogf])) &&
1888
!capable(CAP_NET_RAW)) {
1889
err = -EPERM;
1890
goto drop;
1891
}
1892
1893
/* Since the opcode has already been extracted here, store
1894
* a copy of the value for later use by the drivers.
1895
*/
1896
hci_skb_opcode(skb) = opcode;
1897
1898
if (ogf == 0x3f) {
1899
skb_queue_tail(&hdev->raw_q, skb);
1900
queue_work(hdev->workqueue, &hdev->tx_work);
1901
} else {
1902
/* Stand-alone HCI commands must be flagged as
1903
* single-command requests.
1904
*/
1905
bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1906
1907
skb_queue_tail(&hdev->cmd_q, skb);
1908
queue_work(hdev->workqueue, &hdev->cmd_work);
1909
}
1910
} else {
1911
if (!capable(CAP_NET_RAW)) {
1912
err = -EPERM;
1913
goto drop;
1914
}
1915
1916
if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1917
hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1918
hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1919
err = -EINVAL;
1920
goto drop;
1921
}
1922
1923
skb_queue_tail(&hdev->raw_q, skb);
1924
queue_work(hdev->workqueue, &hdev->tx_work);
1925
}
1926
1927
err = len;
1928
1929
done:
1930
release_sock(sk);
1931
return err;
1932
1933
drop:
1934
kfree_skb(skb);
1935
goto done;
1936
}
1937
1938
static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname,
1939
sockptr_t optval, unsigned int optlen)
1940
{
1941
struct hci_ufilter uf = { .opcode = 0 };
1942
struct sock *sk = sock->sk;
1943
int err = 0, opt = 0;
1944
1945
BT_DBG("sk %p, opt %d", sk, optname);
1946
1947
lock_sock(sk);
1948
1949
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1950
err = -EBADFD;
1951
goto done;
1952
}
1953
1954
switch (optname) {
1955
case HCI_DATA_DIR:
1956
err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
1957
if (err)
1958
break;
1959
1960
if (opt)
1961
hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1962
else
1963
hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1964
break;
1965
1966
case HCI_TIME_STAMP:
1967
err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
1968
if (err)
1969
break;
1970
1971
if (opt)
1972
hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1973
else
1974
hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1975
break;
1976
1977
case HCI_FILTER:
1978
{
1979
struct hci_filter *f = &hci_pi(sk)->filter;
1980
1981
uf.type_mask = f->type_mask;
1982
uf.opcode = f->opcode;
1983
uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1984
uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1985
}
1986
1987
err = copy_safe_from_sockptr(&uf, sizeof(uf), optval, optlen);
1988
if (err)
1989
break;
1990
1991
if (!capable(CAP_NET_RAW)) {
1992
uf.type_mask &= hci_sec_filter.type_mask;
1993
uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1994
uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1995
}
1996
1997
{
1998
struct hci_filter *f = &hci_pi(sk)->filter;
1999
2000
f->type_mask = uf.type_mask;
2001
f->opcode = uf.opcode;
2002
*((u32 *) f->event_mask + 0) = uf.event_mask[0];
2003
*((u32 *) f->event_mask + 1) = uf.event_mask[1];
2004
}
2005
break;
2006
2007
default:
2008
err = -ENOPROTOOPT;
2009
break;
2010
}
2011
2012
done:
2013
release_sock(sk);
2014
return err;
2015
}
2016
2017
static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
2018
sockptr_t optval, unsigned int optlen)
2019
{
2020
struct sock *sk = sock->sk;
2021
int err = 0;
2022
u16 opt;
2023
2024
BT_DBG("sk %p, opt %d", sk, optname);
2025
2026
if (level == SOL_HCI)
2027
return hci_sock_setsockopt_old(sock, level, optname, optval,
2028
optlen);
2029
2030
if (level != SOL_BLUETOOTH)
2031
return -ENOPROTOOPT;
2032
2033
lock_sock(sk);
2034
2035
switch (optname) {
2036
case BT_SNDMTU:
2037
case BT_RCVMTU:
2038
switch (hci_pi(sk)->channel) {
2039
/* Don't allow changing MTU for channels that are meant for HCI
2040
* traffic only.
2041
*/
2042
case HCI_CHANNEL_RAW:
2043
case HCI_CHANNEL_USER:
2044
err = -ENOPROTOOPT;
2045
goto done;
2046
}
2047
2048
err = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen);
2049
if (err)
2050
break;
2051
2052
hci_pi(sk)->mtu = opt;
2053
break;
2054
2055
default:
2056
err = -ENOPROTOOPT;
2057
break;
2058
}
2059
2060
done:
2061
release_sock(sk);
2062
return err;
2063
}
2064
2065
static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname,
2066
char __user *optval, int __user *optlen)
2067
{
2068
struct hci_ufilter uf;
2069
struct sock *sk = sock->sk;
2070
int len, opt, err = 0;
2071
2072
BT_DBG("sk %p, opt %d", sk, optname);
2073
2074
if (get_user(len, optlen))
2075
return -EFAULT;
2076
2077
lock_sock(sk);
2078
2079
if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
2080
err = -EBADFD;
2081
goto done;
2082
}
2083
2084
switch (optname) {
2085
case HCI_DATA_DIR:
2086
if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2087
opt = 1;
2088
else
2089
opt = 0;
2090
2091
if (put_user(opt, optval))
2092
err = -EFAULT;
2093
break;
2094
2095
case HCI_TIME_STAMP:
2096
if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2097
opt = 1;
2098
else
2099
opt = 0;
2100
2101
if (put_user(opt, optval))
2102
err = -EFAULT;
2103
break;
2104
2105
case HCI_FILTER:
2106
{
2107
struct hci_filter *f = &hci_pi(sk)->filter;
2108
2109
memset(&uf, 0, sizeof(uf));
2110
uf.type_mask = f->type_mask;
2111
uf.opcode = f->opcode;
2112
uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2113
uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2114
}
2115
2116
len = min_t(unsigned int, len, sizeof(uf));
2117
if (copy_to_user(optval, &uf, len))
2118
err = -EFAULT;
2119
break;
2120
2121
default:
2122
err = -ENOPROTOOPT;
2123
break;
2124
}
2125
2126
done:
2127
release_sock(sk);
2128
return err;
2129
}
2130
2131
static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
2132
char __user *optval, int __user *optlen)
2133
{
2134
struct sock *sk = sock->sk;
2135
int err = 0;
2136
2137
BT_DBG("sk %p, opt %d", sk, optname);
2138
2139
if (level == SOL_HCI)
2140
return hci_sock_getsockopt_old(sock, level, optname, optval,
2141
optlen);
2142
2143
if (level != SOL_BLUETOOTH)
2144
return -ENOPROTOOPT;
2145
2146
lock_sock(sk);
2147
2148
switch (optname) {
2149
case BT_SNDMTU:
2150
case BT_RCVMTU:
2151
if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval))
2152
err = -EFAULT;
2153
break;
2154
2155
default:
2156
err = -ENOPROTOOPT;
2157
break;
2158
}
2159
2160
release_sock(sk);
2161
return err;
2162
}
2163
2164
static void hci_sock_destruct(struct sock *sk)
2165
{
2166
mgmt_cleanup(sk);
2167
skb_queue_purge(&sk->sk_receive_queue);
2168
skb_queue_purge(&sk->sk_write_queue);
2169
}
2170
2171
static const struct proto_ops hci_sock_ops = {
2172
.family = PF_BLUETOOTH,
2173
.owner = THIS_MODULE,
2174
.release = hci_sock_release,
2175
.bind = hci_sock_bind,
2176
.getname = hci_sock_getname,
2177
.sendmsg = hci_sock_sendmsg,
2178
.recvmsg = hci_sock_recvmsg,
2179
.ioctl = hci_sock_ioctl,
2180
#ifdef CONFIG_COMPAT
2181
.compat_ioctl = hci_sock_compat_ioctl,
2182
#endif
2183
.poll = datagram_poll,
2184
.listen = sock_no_listen,
2185
.shutdown = sock_no_shutdown,
2186
.setsockopt = hci_sock_setsockopt,
2187
.getsockopt = hci_sock_getsockopt,
2188
.connect = sock_no_connect,
2189
.socketpair = sock_no_socketpair,
2190
.accept = sock_no_accept,
2191
.mmap = sock_no_mmap
2192
};
2193
2194
static struct proto hci_sk_proto = {
2195
.name = "HCI",
2196
.owner = THIS_MODULE,
2197
.obj_size = sizeof(struct hci_pinfo)
2198
};
2199
2200
static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2201
int kern)
2202
{
2203
struct sock *sk;
2204
2205
BT_DBG("sock %p", sock);
2206
2207
if (sock->type != SOCK_RAW)
2208
return -ESOCKTNOSUPPORT;
2209
2210
sock->ops = &hci_sock_ops;
2211
2212
sk = bt_sock_alloc(net, sock, &hci_sk_proto, protocol, GFP_ATOMIC,
2213
kern);
2214
if (!sk)
2215
return -ENOMEM;
2216
2217
sock->state = SS_UNCONNECTED;
2218
sk->sk_destruct = hci_sock_destruct;
2219
2220
bt_sock_link(&hci_sk_list, sk);
2221
return 0;
2222
}
2223
2224
static const struct net_proto_family hci_sock_family_ops = {
2225
.family = PF_BLUETOOTH,
2226
.owner = THIS_MODULE,
2227
.create = hci_sock_create,
2228
};
2229
2230
int __init hci_sock_init(void)
2231
{
2232
int err;
2233
2234
BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2235
2236
err = proto_register(&hci_sk_proto, 0);
2237
if (err < 0)
2238
return err;
2239
2240
err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2241
if (err < 0) {
2242
BT_ERR("HCI socket registration failed");
2243
goto error;
2244
}
2245
2246
err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2247
if (err < 0) {
2248
BT_ERR("Failed to create HCI proc file");
2249
bt_sock_unregister(BTPROTO_HCI);
2250
goto error;
2251
}
2252
2253
BT_INFO("HCI socket layer initialized");
2254
2255
return 0;
2256
2257
error:
2258
proto_unregister(&hci_sk_proto);
2259
return err;
2260
}
2261
2262
void hci_sock_cleanup(void)
2263
{
2264
bt_procfs_cleanup(&init_net, "hci");
2265
bt_sock_unregister(BTPROTO_HCI);
2266
proto_unregister(&hci_sk_proto);
2267
}
2268
2269