Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/bluetooth/l2cap_core.c
15109 views
1
/*
2
BlueZ - Bluetooth protocol stack for Linux
3
Copyright (C) 2000-2001 Qualcomm Incorporated
4
Copyright (C) 2009-2010 Gustavo F. Padovan <[email protected]>
5
Copyright (C) 2010 Google Inc.
6
7
Written 2000,2001 by Maxim Krasnyansky <[email protected]>
8
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License version 2 as
11
published by the Free Software Foundation;
12
13
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24
SOFTWARE IS DISCLAIMED.
25
*/
26
27
/* Bluetooth L2CAP core. */
28
29
#include <linux/module.h>
30
31
#include <linux/types.h>
32
#include <linux/capability.h>
33
#include <linux/errno.h>
34
#include <linux/kernel.h>
35
#include <linux/sched.h>
36
#include <linux/slab.h>
37
#include <linux/poll.h>
38
#include <linux/fcntl.h>
39
#include <linux/init.h>
40
#include <linux/interrupt.h>
41
#include <linux/socket.h>
42
#include <linux/skbuff.h>
43
#include <linux/list.h>
44
#include <linux/device.h>
45
#include <linux/debugfs.h>
46
#include <linux/seq_file.h>
47
#include <linux/uaccess.h>
48
#include <linux/crc16.h>
49
#include <net/sock.h>
50
51
#include <asm/system.h>
52
#include <asm/unaligned.h>
53
54
#include <net/bluetooth/bluetooth.h>
55
#include <net/bluetooth/hci_core.h>
56
#include <net/bluetooth/l2cap.h>
57
58
int disable_ertm;
59
60
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61
static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63
static struct workqueue_struct *_busy_wq;
64
65
LIST_HEAD(chan_list);
66
DEFINE_RWLOCK(chan_list_lock);
67
68
static void l2cap_busy_work(struct work_struct *work);
69
70
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71
u8 code, u8 ident, u16 dlen, void *data);
72
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73
74
static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75
76
/* ---- L2CAP channels ---- */
77
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
78
{
79
struct l2cap_chan *c;
80
81
list_for_each_entry(c, &conn->chan_l, list) {
82
if (c->dcid == cid)
83
return c;
84
}
85
return NULL;
86
87
}
88
89
static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
90
{
91
struct l2cap_chan *c;
92
93
list_for_each_entry(c, &conn->chan_l, list) {
94
if (c->scid == cid)
95
return c;
96
}
97
return NULL;
98
}
99
100
/* Find channel with given SCID.
101
* Returns locked socket */
102
static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103
{
104
struct l2cap_chan *c;
105
106
read_lock(&conn->chan_lock);
107
c = __l2cap_get_chan_by_scid(conn, cid);
108
if (c)
109
bh_lock_sock(c->sk);
110
read_unlock(&conn->chan_lock);
111
return c;
112
}
113
114
static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
115
{
116
struct l2cap_chan *c;
117
118
list_for_each_entry(c, &conn->chan_l, list) {
119
if (c->ident == ident)
120
return c;
121
}
122
return NULL;
123
}
124
125
static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126
{
127
struct l2cap_chan *c;
128
129
read_lock(&conn->chan_lock);
130
c = __l2cap_get_chan_by_ident(conn, ident);
131
if (c)
132
bh_lock_sock(c->sk);
133
read_unlock(&conn->chan_lock);
134
return c;
135
}
136
137
static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
138
{
139
struct l2cap_chan *c;
140
141
list_for_each_entry(c, &chan_list, global_l) {
142
if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
143
goto found;
144
}
145
146
c = NULL;
147
found:
148
return c;
149
}
150
151
int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
152
{
153
int err;
154
155
write_lock_bh(&chan_list_lock);
156
157
if (psm && __l2cap_global_chan_by_addr(psm, src)) {
158
err = -EADDRINUSE;
159
goto done;
160
}
161
162
if (psm) {
163
chan->psm = psm;
164
chan->sport = psm;
165
err = 0;
166
} else {
167
u16 p;
168
169
err = -EINVAL;
170
for (p = 0x1001; p < 0x1100; p += 2)
171
if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
172
chan->psm = cpu_to_le16(p);
173
chan->sport = cpu_to_le16(p);
174
err = 0;
175
break;
176
}
177
}
178
179
done:
180
write_unlock_bh(&chan_list_lock);
181
return err;
182
}
183
184
int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
185
{
186
write_lock_bh(&chan_list_lock);
187
188
chan->scid = scid;
189
190
write_unlock_bh(&chan_list_lock);
191
192
return 0;
193
}
194
195
static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
196
{
197
u16 cid = L2CAP_CID_DYN_START;
198
199
for (; cid < L2CAP_CID_DYN_END; cid++) {
200
if (!__l2cap_get_chan_by_scid(conn, cid))
201
return cid;
202
}
203
204
return 0;
205
}
206
207
struct l2cap_chan *l2cap_chan_create(struct sock *sk)
208
{
209
struct l2cap_chan *chan;
210
211
chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
212
if (!chan)
213
return NULL;
214
215
chan->sk = sk;
216
217
write_lock_bh(&chan_list_lock);
218
list_add(&chan->global_l, &chan_list);
219
write_unlock_bh(&chan_list_lock);
220
221
return chan;
222
}
223
224
void l2cap_chan_destroy(struct l2cap_chan *chan)
225
{
226
write_lock_bh(&chan_list_lock);
227
list_del(&chan->global_l);
228
write_unlock_bh(&chan_list_lock);
229
230
kfree(chan);
231
}
232
233
static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
234
{
235
struct sock *sk = chan->sk;
236
237
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
238
chan->psm, chan->dcid);
239
240
conn->disc_reason = 0x13;
241
242
chan->conn = conn;
243
244
if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
245
if (conn->hcon->type == LE_LINK) {
246
/* LE connection */
247
chan->omtu = L2CAP_LE_DEFAULT_MTU;
248
chan->scid = L2CAP_CID_LE_DATA;
249
chan->dcid = L2CAP_CID_LE_DATA;
250
} else {
251
/* Alloc CID for connection-oriented socket */
252
chan->scid = l2cap_alloc_cid(conn);
253
chan->omtu = L2CAP_DEFAULT_MTU;
254
}
255
} else if (sk->sk_type == SOCK_DGRAM) {
256
/* Connectionless socket */
257
chan->scid = L2CAP_CID_CONN_LESS;
258
chan->dcid = L2CAP_CID_CONN_LESS;
259
chan->omtu = L2CAP_DEFAULT_MTU;
260
} else {
261
/* Raw socket can send/recv signalling messages only */
262
chan->scid = L2CAP_CID_SIGNALING;
263
chan->dcid = L2CAP_CID_SIGNALING;
264
chan->omtu = L2CAP_DEFAULT_MTU;
265
}
266
267
sock_hold(sk);
268
269
list_add(&chan->list, &conn->chan_l);
270
}
271
272
/* Delete channel.
273
* Must be called on the locked socket. */
274
void l2cap_chan_del(struct l2cap_chan *chan, int err)
275
{
276
struct sock *sk = chan->sk;
277
struct l2cap_conn *conn = chan->conn;
278
struct sock *parent = bt_sk(sk)->parent;
279
280
l2cap_sock_clear_timer(sk);
281
282
BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
283
284
if (conn) {
285
/* Delete from channel list */
286
write_lock_bh(&conn->chan_lock);
287
list_del(&chan->list);
288
write_unlock_bh(&conn->chan_lock);
289
__sock_put(sk);
290
291
chan->conn = NULL;
292
hci_conn_put(conn->hcon);
293
}
294
295
sk->sk_state = BT_CLOSED;
296
sock_set_flag(sk, SOCK_ZAPPED);
297
298
if (err)
299
sk->sk_err = err;
300
301
if (parent) {
302
bt_accept_unlink(sk);
303
parent->sk_data_ready(parent, 0);
304
} else
305
sk->sk_state_change(sk);
306
307
if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
308
chan->conf_state & L2CAP_CONF_INPUT_DONE))
309
return;
310
311
skb_queue_purge(&chan->tx_q);
312
313
if (chan->mode == L2CAP_MODE_ERTM) {
314
struct srej_list *l, *tmp;
315
316
del_timer(&chan->retrans_timer);
317
del_timer(&chan->monitor_timer);
318
del_timer(&chan->ack_timer);
319
320
skb_queue_purge(&chan->srej_q);
321
skb_queue_purge(&chan->busy_q);
322
323
list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
324
list_del(&l->list);
325
kfree(l);
326
}
327
}
328
}
329
330
static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
331
{
332
struct sock *sk = chan->sk;
333
334
if (sk->sk_type == SOCK_RAW) {
335
switch (chan->sec_level) {
336
case BT_SECURITY_HIGH:
337
return HCI_AT_DEDICATED_BONDING_MITM;
338
case BT_SECURITY_MEDIUM:
339
return HCI_AT_DEDICATED_BONDING;
340
default:
341
return HCI_AT_NO_BONDING;
342
}
343
} else if (chan->psm == cpu_to_le16(0x0001)) {
344
if (chan->sec_level == BT_SECURITY_LOW)
345
chan->sec_level = BT_SECURITY_SDP;
346
347
if (chan->sec_level == BT_SECURITY_HIGH)
348
return HCI_AT_NO_BONDING_MITM;
349
else
350
return HCI_AT_NO_BONDING;
351
} else {
352
switch (chan->sec_level) {
353
case BT_SECURITY_HIGH:
354
return HCI_AT_GENERAL_BONDING_MITM;
355
case BT_SECURITY_MEDIUM:
356
return HCI_AT_GENERAL_BONDING;
357
default:
358
return HCI_AT_NO_BONDING;
359
}
360
}
361
}
362
363
/* Service level security */
364
static inline int l2cap_check_security(struct l2cap_chan *chan)
365
{
366
struct l2cap_conn *conn = chan->conn;
367
__u8 auth_type;
368
369
auth_type = l2cap_get_auth_type(chan);
370
371
return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
372
}
373
374
u8 l2cap_get_ident(struct l2cap_conn *conn)
375
{
376
u8 id;
377
378
/* Get next available identificator.
379
* 1 - 128 are used by kernel.
380
* 129 - 199 are reserved.
381
* 200 - 254 are used by utilities like l2ping, etc.
382
*/
383
384
spin_lock_bh(&conn->lock);
385
386
if (++conn->tx_ident > 128)
387
conn->tx_ident = 1;
388
389
id = conn->tx_ident;
390
391
spin_unlock_bh(&conn->lock);
392
393
return id;
394
}
395
396
void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
397
{
398
struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
399
u8 flags;
400
401
BT_DBG("code 0x%2.2x", code);
402
403
if (!skb)
404
return;
405
406
if (lmp_no_flush_capable(conn->hcon->hdev))
407
flags = ACL_START_NO_FLUSH;
408
else
409
flags = ACL_START;
410
411
hci_send_acl(conn->hcon, skb, flags);
412
}
413
414
static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
415
{
416
struct sk_buff *skb;
417
struct l2cap_hdr *lh;
418
struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
419
struct l2cap_conn *conn = chan->conn;
420
struct sock *sk = (struct sock *)pi;
421
int count, hlen = L2CAP_HDR_SIZE + 2;
422
u8 flags;
423
424
if (sk->sk_state != BT_CONNECTED)
425
return;
426
427
if (chan->fcs == L2CAP_FCS_CRC16)
428
hlen += 2;
429
430
BT_DBG("chan %p, control 0x%2.2x", chan, control);
431
432
count = min_t(unsigned int, conn->mtu, hlen);
433
control |= L2CAP_CTRL_FRAME_TYPE;
434
435
if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
436
control |= L2CAP_CTRL_FINAL;
437
chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
438
}
439
440
if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
441
control |= L2CAP_CTRL_POLL;
442
chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
443
}
444
445
skb = bt_skb_alloc(count, GFP_ATOMIC);
446
if (!skb)
447
return;
448
449
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
450
lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
451
lh->cid = cpu_to_le16(chan->dcid);
452
put_unaligned_le16(control, skb_put(skb, 2));
453
454
if (chan->fcs == L2CAP_FCS_CRC16) {
455
u16 fcs = crc16(0, (u8 *)lh, count - 2);
456
put_unaligned_le16(fcs, skb_put(skb, 2));
457
}
458
459
if (lmp_no_flush_capable(conn->hcon->hdev))
460
flags = ACL_START_NO_FLUSH;
461
else
462
flags = ACL_START;
463
464
hci_send_acl(chan->conn->hcon, skb, flags);
465
}
466
467
static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
468
{
469
if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
470
control |= L2CAP_SUPER_RCV_NOT_READY;
471
chan->conn_state |= L2CAP_CONN_RNR_SENT;
472
} else
473
control |= L2CAP_SUPER_RCV_READY;
474
475
control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
476
477
l2cap_send_sframe(chan, control);
478
}
479
480
static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
481
{
482
return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
483
}
484
485
static void l2cap_do_start(struct l2cap_chan *chan)
486
{
487
struct l2cap_conn *conn = chan->conn;
488
489
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
490
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
491
return;
492
493
if (l2cap_check_security(chan) &&
494
__l2cap_no_conn_pending(chan)) {
495
struct l2cap_conn_req req;
496
req.scid = cpu_to_le16(chan->scid);
497
req.psm = chan->psm;
498
499
chan->ident = l2cap_get_ident(conn);
500
chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
501
502
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
503
sizeof(req), &req);
504
}
505
} else {
506
struct l2cap_info_req req;
507
req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
508
509
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
510
conn->info_ident = l2cap_get_ident(conn);
511
512
mod_timer(&conn->info_timer, jiffies +
513
msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
514
515
l2cap_send_cmd(conn, conn->info_ident,
516
L2CAP_INFO_REQ, sizeof(req), &req);
517
}
518
}
519
520
static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
521
{
522
u32 local_feat_mask = l2cap_feat_mask;
523
if (!disable_ertm)
524
local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
525
526
switch (mode) {
527
case L2CAP_MODE_ERTM:
528
return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
529
case L2CAP_MODE_STREAMING:
530
return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
531
default:
532
return 0x00;
533
}
534
}
535
536
void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
537
{
538
struct sock *sk;
539
struct l2cap_disconn_req req;
540
541
if (!conn)
542
return;
543
544
sk = chan->sk;
545
546
if (chan->mode == L2CAP_MODE_ERTM) {
547
del_timer(&chan->retrans_timer);
548
del_timer(&chan->monitor_timer);
549
del_timer(&chan->ack_timer);
550
}
551
552
req.dcid = cpu_to_le16(chan->dcid);
553
req.scid = cpu_to_le16(chan->scid);
554
l2cap_send_cmd(conn, l2cap_get_ident(conn),
555
L2CAP_DISCONN_REQ, sizeof(req), &req);
556
557
sk->sk_state = BT_DISCONN;
558
sk->sk_err = err;
559
}
560
561
/* ---- L2CAP connections ---- */
562
static void l2cap_conn_start(struct l2cap_conn *conn)
563
{
564
struct l2cap_chan *chan, *tmp;
565
566
BT_DBG("conn %p", conn);
567
568
read_lock(&conn->chan_lock);
569
570
list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
571
struct sock *sk = chan->sk;
572
573
bh_lock_sock(sk);
574
575
if (sk->sk_type != SOCK_SEQPACKET &&
576
sk->sk_type != SOCK_STREAM) {
577
bh_unlock_sock(sk);
578
continue;
579
}
580
581
if (sk->sk_state == BT_CONNECT) {
582
struct l2cap_conn_req req;
583
584
if (!l2cap_check_security(chan) ||
585
!__l2cap_no_conn_pending(chan)) {
586
bh_unlock_sock(sk);
587
continue;
588
}
589
590
if (!l2cap_mode_supported(chan->mode,
591
conn->feat_mask)
592
&& chan->conf_state &
593
L2CAP_CONF_STATE2_DEVICE) {
594
/* __l2cap_sock_close() calls list_del(chan)
595
* so release the lock */
596
read_unlock_bh(&conn->chan_lock);
597
__l2cap_sock_close(sk, ECONNRESET);
598
read_lock_bh(&conn->chan_lock);
599
bh_unlock_sock(sk);
600
continue;
601
}
602
603
req.scid = cpu_to_le16(chan->scid);
604
req.psm = chan->psm;
605
606
chan->ident = l2cap_get_ident(conn);
607
chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
608
609
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
610
sizeof(req), &req);
611
612
} else if (sk->sk_state == BT_CONNECT2) {
613
struct l2cap_conn_rsp rsp;
614
char buf[128];
615
rsp.scid = cpu_to_le16(chan->dcid);
616
rsp.dcid = cpu_to_le16(chan->scid);
617
618
if (l2cap_check_security(chan)) {
619
if (bt_sk(sk)->defer_setup) {
620
struct sock *parent = bt_sk(sk)->parent;
621
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
622
rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
623
if (parent)
624
parent->sk_data_ready(parent, 0);
625
626
} else {
627
sk->sk_state = BT_CONFIG;
628
rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
629
rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
630
}
631
} else {
632
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
633
rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
634
}
635
636
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
637
sizeof(rsp), &rsp);
638
639
if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
640
rsp.result != L2CAP_CR_SUCCESS) {
641
bh_unlock_sock(sk);
642
continue;
643
}
644
645
chan->conf_state |= L2CAP_CONF_REQ_SENT;
646
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
647
l2cap_build_conf_req(chan, buf), buf);
648
chan->num_conf_req++;
649
}
650
651
bh_unlock_sock(sk);
652
}
653
654
read_unlock(&conn->chan_lock);
655
}
656
657
/* Find socket with cid and source bdaddr.
658
* Returns closest match, locked.
659
*/
660
static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
661
{
662
struct l2cap_chan *c, *c1 = NULL;
663
664
read_lock(&chan_list_lock);
665
666
list_for_each_entry(c, &chan_list, global_l) {
667
struct sock *sk = c->sk;
668
669
if (state && sk->sk_state != state)
670
continue;
671
672
if (c->scid == cid) {
673
/* Exact match. */
674
if (!bacmp(&bt_sk(sk)->src, src)) {
675
read_unlock(&chan_list_lock);
676
return c;
677
}
678
679
/* Closest match */
680
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
681
c1 = c;
682
}
683
}
684
685
read_unlock(&chan_list_lock);
686
687
return c1;
688
}
689
690
static void l2cap_le_conn_ready(struct l2cap_conn *conn)
691
{
692
struct sock *parent, *sk;
693
struct l2cap_chan *chan, *pchan;
694
695
BT_DBG("");
696
697
/* Check if we have socket listening on cid */
698
pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
699
conn->src);
700
if (!pchan)
701
return;
702
703
parent = pchan->sk;
704
705
bh_lock_sock(parent);
706
707
/* Check for backlog size */
708
if (sk_acceptq_is_full(parent)) {
709
BT_DBG("backlog full %d", parent->sk_ack_backlog);
710
goto clean;
711
}
712
713
sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
714
if (!sk)
715
goto clean;
716
717
chan = l2cap_chan_create(sk);
718
if (!chan) {
719
l2cap_sock_kill(sk);
720
goto clean;
721
}
722
723
l2cap_pi(sk)->chan = chan;
724
725
write_lock_bh(&conn->chan_lock);
726
727
hci_conn_hold(conn->hcon);
728
729
l2cap_sock_init(sk, parent);
730
731
bacpy(&bt_sk(sk)->src, conn->src);
732
bacpy(&bt_sk(sk)->dst, conn->dst);
733
734
bt_accept_enqueue(parent, sk);
735
736
__l2cap_chan_add(conn, chan);
737
738
l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
739
740
sk->sk_state = BT_CONNECTED;
741
parent->sk_data_ready(parent, 0);
742
743
write_unlock_bh(&conn->chan_lock);
744
745
clean:
746
bh_unlock_sock(parent);
747
}
748
749
static void l2cap_conn_ready(struct l2cap_conn *conn)
750
{
751
struct l2cap_chan *chan;
752
753
BT_DBG("conn %p", conn);
754
755
if (!conn->hcon->out && conn->hcon->type == LE_LINK)
756
l2cap_le_conn_ready(conn);
757
758
read_lock(&conn->chan_lock);
759
760
list_for_each_entry(chan, &conn->chan_l, list) {
761
struct sock *sk = chan->sk;
762
763
bh_lock_sock(sk);
764
765
if (conn->hcon->type == LE_LINK) {
766
l2cap_sock_clear_timer(sk);
767
sk->sk_state = BT_CONNECTED;
768
sk->sk_state_change(sk);
769
}
770
771
if (sk->sk_type != SOCK_SEQPACKET &&
772
sk->sk_type != SOCK_STREAM) {
773
l2cap_sock_clear_timer(sk);
774
sk->sk_state = BT_CONNECTED;
775
sk->sk_state_change(sk);
776
} else if (sk->sk_state == BT_CONNECT)
777
l2cap_do_start(chan);
778
779
bh_unlock_sock(sk);
780
}
781
782
read_unlock(&conn->chan_lock);
783
}
784
785
/* Notify sockets that we cannot guaranty reliability anymore */
786
static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
787
{
788
struct l2cap_chan *chan;
789
790
BT_DBG("conn %p", conn);
791
792
read_lock(&conn->chan_lock);
793
794
list_for_each_entry(chan, &conn->chan_l, list) {
795
struct sock *sk = chan->sk;
796
797
if (chan->force_reliable)
798
sk->sk_err = err;
799
}
800
801
read_unlock(&conn->chan_lock);
802
}
803
804
static void l2cap_info_timeout(unsigned long arg)
805
{
806
struct l2cap_conn *conn = (void *) arg;
807
808
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
809
conn->info_ident = 0;
810
811
l2cap_conn_start(conn);
812
}
813
814
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
815
{
816
struct l2cap_conn *conn = hcon->l2cap_data;
817
818
if (conn || status)
819
return conn;
820
821
conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
822
if (!conn)
823
return NULL;
824
825
hcon->l2cap_data = conn;
826
conn->hcon = hcon;
827
828
BT_DBG("hcon %p conn %p", hcon, conn);
829
830
if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
831
conn->mtu = hcon->hdev->le_mtu;
832
else
833
conn->mtu = hcon->hdev->acl_mtu;
834
835
conn->src = &hcon->hdev->bdaddr;
836
conn->dst = &hcon->dst;
837
838
conn->feat_mask = 0;
839
840
spin_lock_init(&conn->lock);
841
rwlock_init(&conn->chan_lock);
842
843
INIT_LIST_HEAD(&conn->chan_l);
844
845
if (hcon->type != LE_LINK)
846
setup_timer(&conn->info_timer, l2cap_info_timeout,
847
(unsigned long) conn);
848
849
conn->disc_reason = 0x13;
850
851
return conn;
852
}
853
854
static void l2cap_conn_del(struct hci_conn *hcon, int err)
855
{
856
struct l2cap_conn *conn = hcon->l2cap_data;
857
struct l2cap_chan *chan, *l;
858
struct sock *sk;
859
860
if (!conn)
861
return;
862
863
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
864
865
kfree_skb(conn->rx_skb);
866
867
/* Kill channels */
868
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
869
sk = chan->sk;
870
bh_lock_sock(sk);
871
l2cap_chan_del(chan, err);
872
bh_unlock_sock(sk);
873
l2cap_sock_kill(sk);
874
}
875
876
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
877
del_timer_sync(&conn->info_timer);
878
879
hcon->l2cap_data = NULL;
880
kfree(conn);
881
}
882
883
static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
884
{
885
write_lock_bh(&conn->chan_lock);
886
__l2cap_chan_add(conn, chan);
887
write_unlock_bh(&conn->chan_lock);
888
}
889
890
/* ---- Socket interface ---- */
891
892
/* Find socket with psm and source bdaddr.
893
* Returns closest match.
894
*/
895
static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
896
{
897
struct l2cap_chan *c, *c1 = NULL;
898
899
read_lock(&chan_list_lock);
900
901
list_for_each_entry(c, &chan_list, global_l) {
902
struct sock *sk = c->sk;
903
904
if (state && sk->sk_state != state)
905
continue;
906
907
if (c->psm == psm) {
908
/* Exact match. */
909
if (!bacmp(&bt_sk(sk)->src, src)) {
910
read_unlock(&chan_list_lock);
911
return c;
912
}
913
914
/* Closest match */
915
if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
916
c1 = c;
917
}
918
}
919
920
read_unlock(&chan_list_lock);
921
922
return c1;
923
}
924
925
int l2cap_chan_connect(struct l2cap_chan *chan)
926
{
927
struct sock *sk = chan->sk;
928
bdaddr_t *src = &bt_sk(sk)->src;
929
bdaddr_t *dst = &bt_sk(sk)->dst;
930
struct l2cap_conn *conn;
931
struct hci_conn *hcon;
932
struct hci_dev *hdev;
933
__u8 auth_type;
934
int err;
935
936
BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
937
chan->psm);
938
939
hdev = hci_get_route(dst, src);
940
if (!hdev)
941
return -EHOSTUNREACH;
942
943
hci_dev_lock_bh(hdev);
944
945
auth_type = l2cap_get_auth_type(chan);
946
947
if (chan->dcid == L2CAP_CID_LE_DATA)
948
hcon = hci_connect(hdev, LE_LINK, dst,
949
chan->sec_level, auth_type);
950
else
951
hcon = hci_connect(hdev, ACL_LINK, dst,
952
chan->sec_level, auth_type);
953
954
if (IS_ERR(hcon)) {
955
err = PTR_ERR(hcon);
956
goto done;
957
}
958
959
conn = l2cap_conn_add(hcon, 0);
960
if (!conn) {
961
hci_conn_put(hcon);
962
err = -ENOMEM;
963
goto done;
964
}
965
966
/* Update source addr of the socket */
967
bacpy(src, conn->src);
968
969
l2cap_chan_add(conn, chan);
970
971
sk->sk_state = BT_CONNECT;
972
l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
973
974
if (hcon->state == BT_CONNECTED) {
975
if (sk->sk_type != SOCK_SEQPACKET &&
976
sk->sk_type != SOCK_STREAM) {
977
l2cap_sock_clear_timer(sk);
978
if (l2cap_check_security(chan))
979
sk->sk_state = BT_CONNECTED;
980
} else
981
l2cap_do_start(chan);
982
}
983
984
err = 0;
985
986
done:
987
hci_dev_unlock_bh(hdev);
988
hci_dev_put(hdev);
989
return err;
990
}
991
992
int __l2cap_wait_ack(struct sock *sk)
993
{
994
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
995
DECLARE_WAITQUEUE(wait, current);
996
int err = 0;
997
int timeo = HZ/5;
998
999
add_wait_queue(sk_sleep(sk), &wait);
1000
while ((chan->unacked_frames > 0 && chan->conn)) {
1001
set_current_state(TASK_INTERRUPTIBLE);
1002
1003
if (!timeo)
1004
timeo = HZ/5;
1005
1006
if (signal_pending(current)) {
1007
err = sock_intr_errno(timeo);
1008
break;
1009
}
1010
1011
release_sock(sk);
1012
timeo = schedule_timeout(timeo);
1013
lock_sock(sk);
1014
1015
err = sock_error(sk);
1016
if (err)
1017
break;
1018
}
1019
set_current_state(TASK_RUNNING);
1020
remove_wait_queue(sk_sleep(sk), &wait);
1021
return err;
1022
}
1023
1024
static void l2cap_monitor_timeout(unsigned long arg)
1025
{
1026
struct l2cap_chan *chan = (void *) arg;
1027
struct sock *sk = chan->sk;
1028
1029
BT_DBG("chan %p", chan);
1030
1031
bh_lock_sock(sk);
1032
if (chan->retry_count >= chan->remote_max_tx) {
1033
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1034
bh_unlock_sock(sk);
1035
return;
1036
}
1037
1038
chan->retry_count++;
1039
__mod_monitor_timer();
1040
1041
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1042
bh_unlock_sock(sk);
1043
}
1044
1045
static void l2cap_retrans_timeout(unsigned long arg)
1046
{
1047
struct l2cap_chan *chan = (void *) arg;
1048
struct sock *sk = chan->sk;
1049
1050
BT_DBG("chan %p", chan);
1051
1052
bh_lock_sock(sk);
1053
chan->retry_count = 1;
1054
__mod_monitor_timer();
1055
1056
chan->conn_state |= L2CAP_CONN_WAIT_F;
1057
1058
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1059
bh_unlock_sock(sk);
1060
}
1061
1062
static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1063
{
1064
struct sk_buff *skb;
1065
1066
while ((skb = skb_peek(&chan->tx_q)) &&
1067
chan->unacked_frames) {
1068
if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1069
break;
1070
1071
skb = skb_dequeue(&chan->tx_q);
1072
kfree_skb(skb);
1073
1074
chan->unacked_frames--;
1075
}
1076
1077
if (!chan->unacked_frames)
1078
del_timer(&chan->retrans_timer);
1079
}
1080
1081
void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1082
{
1083
struct hci_conn *hcon = chan->conn->hcon;
1084
u16 flags;
1085
1086
BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1087
1088
if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1089
flags = ACL_START_NO_FLUSH;
1090
else
1091
flags = ACL_START;
1092
1093
hci_send_acl(hcon, skb, flags);
1094
}
1095
1096
void l2cap_streaming_send(struct l2cap_chan *chan)
1097
{
1098
struct sk_buff *skb;
1099
u16 control, fcs;
1100
1101
while ((skb = skb_dequeue(&chan->tx_q))) {
1102
control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1103
control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1104
put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1105
1106
if (chan->fcs == L2CAP_FCS_CRC16) {
1107
fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1108
put_unaligned_le16(fcs, skb->data + skb->len - 2);
1109
}
1110
1111
l2cap_do_send(chan, skb);
1112
1113
chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1114
}
1115
}
1116
1117
static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1118
{
1119
struct sk_buff *skb, *tx_skb;
1120
u16 control, fcs;
1121
1122
skb = skb_peek(&chan->tx_q);
1123
if (!skb)
1124
return;
1125
1126
do {
1127
if (bt_cb(skb)->tx_seq == tx_seq)
1128
break;
1129
1130
if (skb_queue_is_last(&chan->tx_q, skb))
1131
return;
1132
1133
} while ((skb = skb_queue_next(&chan->tx_q, skb)));
1134
1135
if (chan->remote_max_tx &&
1136
bt_cb(skb)->retries == chan->remote_max_tx) {
1137
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1138
return;
1139
}
1140
1141
tx_skb = skb_clone(skb, GFP_ATOMIC);
1142
bt_cb(skb)->retries++;
1143
control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1144
control &= L2CAP_CTRL_SAR;
1145
1146
if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1147
control |= L2CAP_CTRL_FINAL;
1148
chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1149
}
1150
1151
control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1152
| (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1153
1154
put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1155
1156
if (chan->fcs == L2CAP_FCS_CRC16) {
1157
fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1158
put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1159
}
1160
1161
l2cap_do_send(chan, tx_skb);
1162
}
1163
1164
int l2cap_ertm_send(struct l2cap_chan *chan)
1165
{
1166
struct sk_buff *skb, *tx_skb;
1167
struct sock *sk = chan->sk;
1168
u16 control, fcs;
1169
int nsent = 0;
1170
1171
if (sk->sk_state != BT_CONNECTED)
1172
return -ENOTCONN;
1173
1174
while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1175
1176
if (chan->remote_max_tx &&
1177
bt_cb(skb)->retries == chan->remote_max_tx) {
1178
l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1179
break;
1180
}
1181
1182
tx_skb = skb_clone(skb, GFP_ATOMIC);
1183
1184
bt_cb(skb)->retries++;
1185
1186
control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1187
control &= L2CAP_CTRL_SAR;
1188
1189
if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1190
control |= L2CAP_CTRL_FINAL;
1191
chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1192
}
1193
control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1194
| (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1195
put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1196
1197
1198
if (chan->fcs == L2CAP_FCS_CRC16) {
1199
fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1200
put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1201
}
1202
1203
l2cap_do_send(chan, tx_skb);
1204
1205
__mod_retrans_timer();
1206
1207
bt_cb(skb)->tx_seq = chan->next_tx_seq;
1208
chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1209
1210
if (bt_cb(skb)->retries == 1)
1211
chan->unacked_frames++;
1212
1213
chan->frames_sent++;
1214
1215
if (skb_queue_is_last(&chan->tx_q, skb))
1216
chan->tx_send_head = NULL;
1217
else
1218
chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1219
1220
nsent++;
1221
}
1222
1223
return nsent;
1224
}
1225
1226
static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1227
{
1228
int ret;
1229
1230
if (!skb_queue_empty(&chan->tx_q))
1231
chan->tx_send_head = chan->tx_q.next;
1232
1233
chan->next_tx_seq = chan->expected_ack_seq;
1234
ret = l2cap_ertm_send(chan);
1235
return ret;
1236
}
1237
1238
static void l2cap_send_ack(struct l2cap_chan *chan)
1239
{
1240
u16 control = 0;
1241
1242
control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1243
1244
if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1245
control |= L2CAP_SUPER_RCV_NOT_READY;
1246
chan->conn_state |= L2CAP_CONN_RNR_SENT;
1247
l2cap_send_sframe(chan, control);
1248
return;
1249
}
1250
1251
if (l2cap_ertm_send(chan) > 0)
1252
return;
1253
1254
control |= L2CAP_SUPER_RCV_READY;
1255
l2cap_send_sframe(chan, control);
1256
}
1257
1258
static void l2cap_send_srejtail(struct l2cap_chan *chan)
1259
{
1260
struct srej_list *tail;
1261
u16 control;
1262
1263
control = L2CAP_SUPER_SELECT_REJECT;
1264
control |= L2CAP_CTRL_FINAL;
1265
1266
tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1267
control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1268
1269
l2cap_send_sframe(chan, control);
1270
}
1271
1272
static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1273
{
1274
struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1275
struct sk_buff **frag;
1276
int err, sent = 0;
1277
1278
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1279
return -EFAULT;
1280
1281
sent += count;
1282
len -= count;
1283
1284
/* Continuation fragments (no L2CAP header) */
1285
frag = &skb_shinfo(skb)->frag_list;
1286
while (len) {
1287
count = min_t(unsigned int, conn->mtu, len);
1288
1289
*frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1290
if (!*frag)
1291
return err;
1292
if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1293
return -EFAULT;
1294
1295
sent += count;
1296
len -= count;
1297
1298
frag = &(*frag)->next;
1299
}
1300
1301
return sent;
1302
}
1303
1304
struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1305
{
1306
struct sock *sk = chan->sk;
1307
struct l2cap_conn *conn = chan->conn;
1308
struct sk_buff *skb;
1309
int err, count, hlen = L2CAP_HDR_SIZE + 2;
1310
struct l2cap_hdr *lh;
1311
1312
BT_DBG("sk %p len %d", sk, (int)len);
1313
1314
count = min_t(unsigned int, (conn->mtu - hlen), len);
1315
skb = bt_skb_send_alloc(sk, count + hlen,
1316
msg->msg_flags & MSG_DONTWAIT, &err);
1317
if (!skb)
1318
return ERR_PTR(err);
1319
1320
/* Create L2CAP header */
1321
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1322
lh->cid = cpu_to_le16(chan->dcid);
1323
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1324
put_unaligned_le16(chan->psm, skb_put(skb, 2));
1325
1326
err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1327
if (unlikely(err < 0)) {
1328
kfree_skb(skb);
1329
return ERR_PTR(err);
1330
}
1331
return skb;
1332
}
1333
1334
struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1335
{
1336
struct sock *sk = chan->sk;
1337
struct l2cap_conn *conn = chan->conn;
1338
struct sk_buff *skb;
1339
int err, count, hlen = L2CAP_HDR_SIZE;
1340
struct l2cap_hdr *lh;
1341
1342
BT_DBG("sk %p len %d", sk, (int)len);
1343
1344
count = min_t(unsigned int, (conn->mtu - hlen), len);
1345
skb = bt_skb_send_alloc(sk, count + hlen,
1346
msg->msg_flags & MSG_DONTWAIT, &err);
1347
if (!skb)
1348
return ERR_PTR(err);
1349
1350
/* Create L2CAP header */
1351
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1352
lh->cid = cpu_to_le16(chan->dcid);
1353
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1354
1355
err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1356
if (unlikely(err < 0)) {
1357
kfree_skb(skb);
1358
return ERR_PTR(err);
1359
}
1360
return skb;
1361
}
1362
1363
struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1364
{
1365
struct sock *sk = chan->sk;
1366
struct l2cap_conn *conn = chan->conn;
1367
struct sk_buff *skb;
1368
int err, count, hlen = L2CAP_HDR_SIZE + 2;
1369
struct l2cap_hdr *lh;
1370
1371
BT_DBG("sk %p len %d", sk, (int)len);
1372
1373
if (!conn)
1374
return ERR_PTR(-ENOTCONN);
1375
1376
if (sdulen)
1377
hlen += 2;
1378
1379
if (chan->fcs == L2CAP_FCS_CRC16)
1380
hlen += 2;
1381
1382
count = min_t(unsigned int, (conn->mtu - hlen), len);
1383
skb = bt_skb_send_alloc(sk, count + hlen,
1384
msg->msg_flags & MSG_DONTWAIT, &err);
1385
if (!skb)
1386
return ERR_PTR(err);
1387
1388
/* Create L2CAP header */
1389
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1390
lh->cid = cpu_to_le16(chan->dcid);
1391
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1392
put_unaligned_le16(control, skb_put(skb, 2));
1393
if (sdulen)
1394
put_unaligned_le16(sdulen, skb_put(skb, 2));
1395
1396
err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1397
if (unlikely(err < 0)) {
1398
kfree_skb(skb);
1399
return ERR_PTR(err);
1400
}
1401
1402
if (chan->fcs == L2CAP_FCS_CRC16)
1403
put_unaligned_le16(0, skb_put(skb, 2));
1404
1405
bt_cb(skb)->retries = 0;
1406
return skb;
1407
}
1408
1409
int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1410
{
1411
struct sk_buff *skb;
1412
struct sk_buff_head sar_queue;
1413
u16 control;
1414
size_t size = 0;
1415
1416
skb_queue_head_init(&sar_queue);
1417
control = L2CAP_SDU_START;
1418
skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1419
if (IS_ERR(skb))
1420
return PTR_ERR(skb);
1421
1422
__skb_queue_tail(&sar_queue, skb);
1423
len -= chan->remote_mps;
1424
size += chan->remote_mps;
1425
1426
while (len > 0) {
1427
size_t buflen;
1428
1429
if (len > chan->remote_mps) {
1430
control = L2CAP_SDU_CONTINUE;
1431
buflen = chan->remote_mps;
1432
} else {
1433
control = L2CAP_SDU_END;
1434
buflen = len;
1435
}
1436
1437
skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1438
if (IS_ERR(skb)) {
1439
skb_queue_purge(&sar_queue);
1440
return PTR_ERR(skb);
1441
}
1442
1443
__skb_queue_tail(&sar_queue, skb);
1444
len -= buflen;
1445
size += buflen;
1446
}
1447
skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1448
if (chan->tx_send_head == NULL)
1449
chan->tx_send_head = sar_queue.next;
1450
1451
return size;
1452
}
1453
1454
static void l2cap_chan_ready(struct sock *sk)
1455
{
1456
struct sock *parent = bt_sk(sk)->parent;
1457
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1458
1459
BT_DBG("sk %p, parent %p", sk, parent);
1460
1461
chan->conf_state = 0;
1462
l2cap_sock_clear_timer(sk);
1463
1464
if (!parent) {
1465
/* Outgoing channel.
1466
* Wake up socket sleeping on connect.
1467
*/
1468
sk->sk_state = BT_CONNECTED;
1469
sk->sk_state_change(sk);
1470
} else {
1471
/* Incoming channel.
1472
* Wake up socket sleeping on accept.
1473
*/
1474
parent->sk_data_ready(parent, 0);
1475
}
1476
}
1477
1478
/* Copy frame to all raw sockets on that connection */
1479
static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1480
{
1481
struct sk_buff *nskb;
1482
struct l2cap_chan *chan;
1483
1484
BT_DBG("conn %p", conn);
1485
1486
read_lock(&conn->chan_lock);
1487
list_for_each_entry(chan, &conn->chan_l, list) {
1488
struct sock *sk = chan->sk;
1489
if (sk->sk_type != SOCK_RAW)
1490
continue;
1491
1492
/* Don't send frame to the socket it came from */
1493
if (skb->sk == sk)
1494
continue;
1495
nskb = skb_clone(skb, GFP_ATOMIC);
1496
if (!nskb)
1497
continue;
1498
1499
if (sock_queue_rcv_skb(sk, nskb))
1500
kfree_skb(nskb);
1501
}
1502
read_unlock(&conn->chan_lock);
1503
}
1504
1505
/* ---- L2CAP signalling commands ---- */
1506
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1507
u8 code, u8 ident, u16 dlen, void *data)
1508
{
1509
struct sk_buff *skb, **frag;
1510
struct l2cap_cmd_hdr *cmd;
1511
struct l2cap_hdr *lh;
1512
int len, count;
1513
1514
BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1515
conn, code, ident, dlen);
1516
1517
len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1518
count = min_t(unsigned int, conn->mtu, len);
1519
1520
skb = bt_skb_alloc(count, GFP_ATOMIC);
1521
if (!skb)
1522
return NULL;
1523
1524
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1525
lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1526
1527
if (conn->hcon->type == LE_LINK)
1528
lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1529
else
1530
lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1531
1532
cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1533
cmd->code = code;
1534
cmd->ident = ident;
1535
cmd->len = cpu_to_le16(dlen);
1536
1537
if (dlen) {
1538
count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1539
memcpy(skb_put(skb, count), data, count);
1540
data += count;
1541
}
1542
1543
len -= skb->len;
1544
1545
/* Continuation fragments (no L2CAP header) */
1546
frag = &skb_shinfo(skb)->frag_list;
1547
while (len) {
1548
count = min_t(unsigned int, conn->mtu, len);
1549
1550
*frag = bt_skb_alloc(count, GFP_ATOMIC);
1551
if (!*frag)
1552
goto fail;
1553
1554
memcpy(skb_put(*frag, count), data, count);
1555
1556
len -= count;
1557
data += count;
1558
1559
frag = &(*frag)->next;
1560
}
1561
1562
return skb;
1563
1564
fail:
1565
kfree_skb(skb);
1566
return NULL;
1567
}
1568
1569
static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1570
{
1571
struct l2cap_conf_opt *opt = *ptr;
1572
int len;
1573
1574
len = L2CAP_CONF_OPT_SIZE + opt->len;
1575
*ptr += len;
1576
1577
*type = opt->type;
1578
*olen = opt->len;
1579
1580
switch (opt->len) {
1581
case 1:
1582
*val = *((u8 *) opt->val);
1583
break;
1584
1585
case 2:
1586
*val = get_unaligned_le16(opt->val);
1587
break;
1588
1589
case 4:
1590
*val = get_unaligned_le32(opt->val);
1591
break;
1592
1593
default:
1594
*val = (unsigned long) opt->val;
1595
break;
1596
}
1597
1598
BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1599
return len;
1600
}
1601
1602
static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1603
{
1604
struct l2cap_conf_opt *opt = *ptr;
1605
1606
BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1607
1608
opt->type = type;
1609
opt->len = len;
1610
1611
switch (len) {
1612
case 1:
1613
*((u8 *) opt->val) = val;
1614
break;
1615
1616
case 2:
1617
put_unaligned_le16(val, opt->val);
1618
break;
1619
1620
case 4:
1621
put_unaligned_le32(val, opt->val);
1622
break;
1623
1624
default:
1625
memcpy(opt->val, (void *) val, len);
1626
break;
1627
}
1628
1629
*ptr += L2CAP_CONF_OPT_SIZE + len;
1630
}
1631
1632
static void l2cap_ack_timeout(unsigned long arg)
1633
{
1634
struct l2cap_chan *chan = (void *) arg;
1635
1636
bh_lock_sock(chan->sk);
1637
l2cap_send_ack(chan);
1638
bh_unlock_sock(chan->sk);
1639
}
1640
1641
static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1642
{
1643
struct sock *sk = chan->sk;
1644
1645
chan->expected_ack_seq = 0;
1646
chan->unacked_frames = 0;
1647
chan->buffer_seq = 0;
1648
chan->num_acked = 0;
1649
chan->frames_sent = 0;
1650
1651
setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1652
(unsigned long) chan);
1653
setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1654
(unsigned long) chan);
1655
setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1656
1657
skb_queue_head_init(&chan->srej_q);
1658
skb_queue_head_init(&chan->busy_q);
1659
1660
INIT_LIST_HEAD(&chan->srej_l);
1661
1662
INIT_WORK(&chan->busy_work, l2cap_busy_work);
1663
1664
sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1665
}
1666
1667
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1668
{
1669
switch (mode) {
1670
case L2CAP_MODE_STREAMING:
1671
case L2CAP_MODE_ERTM:
1672
if (l2cap_mode_supported(mode, remote_feat_mask))
1673
return mode;
1674
/* fall through */
1675
default:
1676
return L2CAP_MODE_BASIC;
1677
}
1678
}
1679
1680
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1681
{
1682
struct l2cap_conf_req *req = data;
1683
struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1684
void *ptr = req->data;
1685
1686
BT_DBG("chan %p", chan);
1687
1688
if (chan->num_conf_req || chan->num_conf_rsp)
1689
goto done;
1690
1691
switch (chan->mode) {
1692
case L2CAP_MODE_STREAMING:
1693
case L2CAP_MODE_ERTM:
1694
if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1695
break;
1696
1697
/* fall through */
1698
default:
1699
chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1700
break;
1701
}
1702
1703
done:
1704
if (chan->imtu != L2CAP_DEFAULT_MTU)
1705
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1706
1707
switch (chan->mode) {
1708
case L2CAP_MODE_BASIC:
1709
if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1710
!(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1711
break;
1712
1713
rfc.mode = L2CAP_MODE_BASIC;
1714
rfc.txwin_size = 0;
1715
rfc.max_transmit = 0;
1716
rfc.retrans_timeout = 0;
1717
rfc.monitor_timeout = 0;
1718
rfc.max_pdu_size = 0;
1719
1720
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1721
(unsigned long) &rfc);
1722
break;
1723
1724
case L2CAP_MODE_ERTM:
1725
rfc.mode = L2CAP_MODE_ERTM;
1726
rfc.txwin_size = chan->tx_win;
1727
rfc.max_transmit = chan->max_tx;
1728
rfc.retrans_timeout = 0;
1729
rfc.monitor_timeout = 0;
1730
rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1731
if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1732
rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1733
1734
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1735
(unsigned long) &rfc);
1736
1737
if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1738
break;
1739
1740
if (chan->fcs == L2CAP_FCS_NONE ||
1741
chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1742
chan->fcs = L2CAP_FCS_NONE;
1743
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1744
}
1745
break;
1746
1747
case L2CAP_MODE_STREAMING:
1748
rfc.mode = L2CAP_MODE_STREAMING;
1749
rfc.txwin_size = 0;
1750
rfc.max_transmit = 0;
1751
rfc.retrans_timeout = 0;
1752
rfc.monitor_timeout = 0;
1753
rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1754
if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1755
rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1756
1757
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1758
(unsigned long) &rfc);
1759
1760
if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1761
break;
1762
1763
if (chan->fcs == L2CAP_FCS_NONE ||
1764
chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1765
chan->fcs = L2CAP_FCS_NONE;
1766
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1767
}
1768
break;
1769
}
1770
1771
req->dcid = cpu_to_le16(chan->dcid);
1772
req->flags = cpu_to_le16(0);
1773
1774
return ptr - data;
1775
}
1776
1777
static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1778
{
1779
struct l2cap_conf_rsp *rsp = data;
1780
void *ptr = rsp->data;
1781
void *req = chan->conf_req;
1782
int len = chan->conf_len;
1783
int type, hint, olen;
1784
unsigned long val;
1785
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1786
u16 mtu = L2CAP_DEFAULT_MTU;
1787
u16 result = L2CAP_CONF_SUCCESS;
1788
1789
BT_DBG("chan %p", chan);
1790
1791
while (len >= L2CAP_CONF_OPT_SIZE) {
1792
len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1793
1794
hint = type & L2CAP_CONF_HINT;
1795
type &= L2CAP_CONF_MASK;
1796
1797
switch (type) {
1798
case L2CAP_CONF_MTU:
1799
mtu = val;
1800
break;
1801
1802
case L2CAP_CONF_FLUSH_TO:
1803
chan->flush_to = val;
1804
break;
1805
1806
case L2CAP_CONF_QOS:
1807
break;
1808
1809
case L2CAP_CONF_RFC:
1810
if (olen == sizeof(rfc))
1811
memcpy(&rfc, (void *) val, olen);
1812
break;
1813
1814
case L2CAP_CONF_FCS:
1815
if (val == L2CAP_FCS_NONE)
1816
chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1817
1818
break;
1819
1820
default:
1821
if (hint)
1822
break;
1823
1824
result = L2CAP_CONF_UNKNOWN;
1825
*((u8 *) ptr++) = type;
1826
break;
1827
}
1828
}
1829
1830
if (chan->num_conf_rsp || chan->num_conf_req > 1)
1831
goto done;
1832
1833
switch (chan->mode) {
1834
case L2CAP_MODE_STREAMING:
1835
case L2CAP_MODE_ERTM:
1836
if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1837
chan->mode = l2cap_select_mode(rfc.mode,
1838
chan->conn->feat_mask);
1839
break;
1840
}
1841
1842
if (chan->mode != rfc.mode)
1843
return -ECONNREFUSED;
1844
1845
break;
1846
}
1847
1848
done:
1849
if (chan->mode != rfc.mode) {
1850
result = L2CAP_CONF_UNACCEPT;
1851
rfc.mode = chan->mode;
1852
1853
if (chan->num_conf_rsp == 1)
1854
return -ECONNREFUSED;
1855
1856
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1857
sizeof(rfc), (unsigned long) &rfc);
1858
}
1859
1860
1861
if (result == L2CAP_CONF_SUCCESS) {
1862
/* Configure output options and let the other side know
1863
* which ones we don't like. */
1864
1865
if (mtu < L2CAP_DEFAULT_MIN_MTU)
1866
result = L2CAP_CONF_UNACCEPT;
1867
else {
1868
chan->omtu = mtu;
1869
chan->conf_state |= L2CAP_CONF_MTU_DONE;
1870
}
1871
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1872
1873
switch (rfc.mode) {
1874
case L2CAP_MODE_BASIC:
1875
chan->fcs = L2CAP_FCS_NONE;
1876
chan->conf_state |= L2CAP_CONF_MODE_DONE;
1877
break;
1878
1879
case L2CAP_MODE_ERTM:
1880
chan->remote_tx_win = rfc.txwin_size;
1881
chan->remote_max_tx = rfc.max_transmit;
1882
1883
if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1884
rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1885
1886
chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1887
1888
rfc.retrans_timeout =
1889
le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1890
rfc.monitor_timeout =
1891
le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1892
1893
chan->conf_state |= L2CAP_CONF_MODE_DONE;
1894
1895
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1896
sizeof(rfc), (unsigned long) &rfc);
1897
1898
break;
1899
1900
case L2CAP_MODE_STREAMING:
1901
if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1902
rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1903
1904
chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1905
1906
chan->conf_state |= L2CAP_CONF_MODE_DONE;
1907
1908
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1909
sizeof(rfc), (unsigned long) &rfc);
1910
1911
break;
1912
1913
default:
1914
result = L2CAP_CONF_UNACCEPT;
1915
1916
memset(&rfc, 0, sizeof(rfc));
1917
rfc.mode = chan->mode;
1918
}
1919
1920
if (result == L2CAP_CONF_SUCCESS)
1921
chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1922
}
1923
rsp->scid = cpu_to_le16(chan->dcid);
1924
rsp->result = cpu_to_le16(result);
1925
rsp->flags = cpu_to_le16(0x0000);
1926
1927
return ptr - data;
1928
}
1929
1930
static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
1931
{
1932
struct l2cap_conf_req *req = data;
1933
void *ptr = req->data;
1934
int type, olen;
1935
unsigned long val;
1936
struct l2cap_conf_rfc rfc;
1937
1938
BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
1939
1940
while (len >= L2CAP_CONF_OPT_SIZE) {
1941
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1942
1943
switch (type) {
1944
case L2CAP_CONF_MTU:
1945
if (val < L2CAP_DEFAULT_MIN_MTU) {
1946
*result = L2CAP_CONF_UNACCEPT;
1947
chan->imtu = L2CAP_DEFAULT_MIN_MTU;
1948
} else
1949
chan->imtu = val;
1950
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1951
break;
1952
1953
case L2CAP_CONF_FLUSH_TO:
1954
chan->flush_to = val;
1955
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1956
2, chan->flush_to);
1957
break;
1958
1959
case L2CAP_CONF_RFC:
1960
if (olen == sizeof(rfc))
1961
memcpy(&rfc, (void *)val, olen);
1962
1963
if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1964
rfc.mode != chan->mode)
1965
return -ECONNREFUSED;
1966
1967
chan->fcs = 0;
1968
1969
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1970
sizeof(rfc), (unsigned long) &rfc);
1971
break;
1972
}
1973
}
1974
1975
if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
1976
return -ECONNREFUSED;
1977
1978
chan->mode = rfc.mode;
1979
1980
if (*result == L2CAP_CONF_SUCCESS) {
1981
switch (rfc.mode) {
1982
case L2CAP_MODE_ERTM:
1983
chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1984
chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1985
chan->mps = le16_to_cpu(rfc.max_pdu_size);
1986
break;
1987
case L2CAP_MODE_STREAMING:
1988
chan->mps = le16_to_cpu(rfc.max_pdu_size);
1989
}
1990
}
1991
1992
req->dcid = cpu_to_le16(chan->dcid);
1993
req->flags = cpu_to_le16(0x0000);
1994
1995
return ptr - data;
1996
}
1997
1998
static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
1999
{
2000
struct l2cap_conf_rsp *rsp = data;
2001
void *ptr = rsp->data;
2002
2003
BT_DBG("chan %p", chan);
2004
2005
rsp->scid = cpu_to_le16(chan->dcid);
2006
rsp->result = cpu_to_le16(result);
2007
rsp->flags = cpu_to_le16(flags);
2008
2009
return ptr - data;
2010
}
2011
2012
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2013
{
2014
struct l2cap_conn_rsp rsp;
2015
struct l2cap_conn *conn = chan->conn;
2016
u8 buf[128];
2017
2018
rsp.scid = cpu_to_le16(chan->dcid);
2019
rsp.dcid = cpu_to_le16(chan->scid);
2020
rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2021
rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2022
l2cap_send_cmd(conn, chan->ident,
2023
L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2024
2025
if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2026
return;
2027
2028
chan->conf_state |= L2CAP_CONF_REQ_SENT;
2029
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2030
l2cap_build_conf_req(chan, buf), buf);
2031
chan->num_conf_req++;
2032
}
2033
2034
static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2035
{
2036
int type, olen;
2037
unsigned long val;
2038
struct l2cap_conf_rfc rfc;
2039
2040
BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2041
2042
if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2043
return;
2044
2045
while (len >= L2CAP_CONF_OPT_SIZE) {
2046
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2047
2048
switch (type) {
2049
case L2CAP_CONF_RFC:
2050
if (olen == sizeof(rfc))
2051
memcpy(&rfc, (void *)val, olen);
2052
goto done;
2053
}
2054
}
2055
2056
done:
2057
switch (rfc.mode) {
2058
case L2CAP_MODE_ERTM:
2059
chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2060
chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2061
chan->mps = le16_to_cpu(rfc.max_pdu_size);
2062
break;
2063
case L2CAP_MODE_STREAMING:
2064
chan->mps = le16_to_cpu(rfc.max_pdu_size);
2065
}
2066
}
2067
2068
static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2069
{
2070
struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2071
2072
if (rej->reason != 0x0000)
2073
return 0;
2074
2075
if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2076
cmd->ident == conn->info_ident) {
2077
del_timer(&conn->info_timer);
2078
2079
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2080
conn->info_ident = 0;
2081
2082
l2cap_conn_start(conn);
2083
}
2084
2085
return 0;
2086
}
2087
2088
static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2089
{
2090
struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2091
struct l2cap_conn_rsp rsp;
2092
struct l2cap_chan *chan = NULL, *pchan;
2093
struct sock *parent, *sk = NULL;
2094
int result, status = L2CAP_CS_NO_INFO;
2095
2096
u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2097
__le16 psm = req->psm;
2098
2099
BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2100
2101
/* Check if we have socket listening on psm */
2102
pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2103
if (!pchan) {
2104
result = L2CAP_CR_BAD_PSM;
2105
goto sendresp;
2106
}
2107
2108
parent = pchan->sk;
2109
2110
bh_lock_sock(parent);
2111
2112
/* Check if the ACL is secure enough (if not SDP) */
2113
if (psm != cpu_to_le16(0x0001) &&
2114
!hci_conn_check_link_mode(conn->hcon)) {
2115
conn->disc_reason = 0x05;
2116
result = L2CAP_CR_SEC_BLOCK;
2117
goto response;
2118
}
2119
2120
result = L2CAP_CR_NO_MEM;
2121
2122
/* Check for backlog size */
2123
if (sk_acceptq_is_full(parent)) {
2124
BT_DBG("backlog full %d", parent->sk_ack_backlog);
2125
goto response;
2126
}
2127
2128
sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2129
if (!sk)
2130
goto response;
2131
2132
chan = l2cap_chan_create(sk);
2133
if (!chan) {
2134
l2cap_sock_kill(sk);
2135
goto response;
2136
}
2137
2138
l2cap_pi(sk)->chan = chan;
2139
2140
write_lock_bh(&conn->chan_lock);
2141
2142
/* Check if we already have channel with that dcid */
2143
if (__l2cap_get_chan_by_dcid(conn, scid)) {
2144
write_unlock_bh(&conn->chan_lock);
2145
sock_set_flag(sk, SOCK_ZAPPED);
2146
l2cap_sock_kill(sk);
2147
goto response;
2148
}
2149
2150
hci_conn_hold(conn->hcon);
2151
2152
l2cap_sock_init(sk, parent);
2153
bacpy(&bt_sk(sk)->src, conn->src);
2154
bacpy(&bt_sk(sk)->dst, conn->dst);
2155
chan->psm = psm;
2156
chan->dcid = scid;
2157
2158
bt_accept_enqueue(parent, sk);
2159
2160
__l2cap_chan_add(conn, chan);
2161
2162
dcid = chan->scid;
2163
2164
l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2165
2166
chan->ident = cmd->ident;
2167
2168
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2169
if (l2cap_check_security(chan)) {
2170
if (bt_sk(sk)->defer_setup) {
2171
sk->sk_state = BT_CONNECT2;
2172
result = L2CAP_CR_PEND;
2173
status = L2CAP_CS_AUTHOR_PEND;
2174
parent->sk_data_ready(parent, 0);
2175
} else {
2176
sk->sk_state = BT_CONFIG;
2177
result = L2CAP_CR_SUCCESS;
2178
status = L2CAP_CS_NO_INFO;
2179
}
2180
} else {
2181
sk->sk_state = BT_CONNECT2;
2182
result = L2CAP_CR_PEND;
2183
status = L2CAP_CS_AUTHEN_PEND;
2184
}
2185
} else {
2186
sk->sk_state = BT_CONNECT2;
2187
result = L2CAP_CR_PEND;
2188
status = L2CAP_CS_NO_INFO;
2189
}
2190
2191
write_unlock_bh(&conn->chan_lock);
2192
2193
response:
2194
bh_unlock_sock(parent);
2195
2196
sendresp:
2197
rsp.scid = cpu_to_le16(scid);
2198
rsp.dcid = cpu_to_le16(dcid);
2199
rsp.result = cpu_to_le16(result);
2200
rsp.status = cpu_to_le16(status);
2201
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2202
2203
if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2204
struct l2cap_info_req info;
2205
info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2206
2207
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2208
conn->info_ident = l2cap_get_ident(conn);
2209
2210
mod_timer(&conn->info_timer, jiffies +
2211
msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2212
2213
l2cap_send_cmd(conn, conn->info_ident,
2214
L2CAP_INFO_REQ, sizeof(info), &info);
2215
}
2216
2217
if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2218
result == L2CAP_CR_SUCCESS) {
2219
u8 buf[128];
2220
chan->conf_state |= L2CAP_CONF_REQ_SENT;
2221
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2222
l2cap_build_conf_req(chan, buf), buf);
2223
chan->num_conf_req++;
2224
}
2225
2226
return 0;
2227
}
2228
2229
static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2230
{
2231
struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2232
u16 scid, dcid, result, status;
2233
struct l2cap_chan *chan;
2234
struct sock *sk;
2235
u8 req[128];
2236
2237
scid = __le16_to_cpu(rsp->scid);
2238
dcid = __le16_to_cpu(rsp->dcid);
2239
result = __le16_to_cpu(rsp->result);
2240
status = __le16_to_cpu(rsp->status);
2241
2242
BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2243
2244
if (scid) {
2245
chan = l2cap_get_chan_by_scid(conn, scid);
2246
if (!chan)
2247
return -EFAULT;
2248
} else {
2249
chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2250
if (!chan)
2251
return -EFAULT;
2252
}
2253
2254
sk = chan->sk;
2255
2256
switch (result) {
2257
case L2CAP_CR_SUCCESS:
2258
sk->sk_state = BT_CONFIG;
2259
chan->ident = 0;
2260
chan->dcid = dcid;
2261
chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2262
2263
if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2264
break;
2265
2266
chan->conf_state |= L2CAP_CONF_REQ_SENT;
2267
2268
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2269
l2cap_build_conf_req(chan, req), req);
2270
chan->num_conf_req++;
2271
break;
2272
2273
case L2CAP_CR_PEND:
2274
chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2275
break;
2276
2277
default:
2278
/* don't delete l2cap channel if sk is owned by user */
2279
if (sock_owned_by_user(sk)) {
2280
sk->sk_state = BT_DISCONN;
2281
l2cap_sock_clear_timer(sk);
2282
l2cap_sock_set_timer(sk, HZ / 5);
2283
break;
2284
}
2285
2286
l2cap_chan_del(chan, ECONNREFUSED);
2287
break;
2288
}
2289
2290
bh_unlock_sock(sk);
2291
return 0;
2292
}
2293
2294
static inline void set_default_fcs(struct l2cap_chan *chan)
2295
{
2296
struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2297
2298
/* FCS is enabled only in ERTM or streaming mode, if one or both
2299
* sides request it.
2300
*/
2301
if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2302
chan->fcs = L2CAP_FCS_NONE;
2303
else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2304
chan->fcs = L2CAP_FCS_CRC16;
2305
}
2306
2307
static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2308
{
2309
struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2310
u16 dcid, flags;
2311
u8 rsp[64];
2312
struct l2cap_chan *chan;
2313
struct sock *sk;
2314
int len;
2315
2316
dcid = __le16_to_cpu(req->dcid);
2317
flags = __le16_to_cpu(req->flags);
2318
2319
BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2320
2321
chan = l2cap_get_chan_by_scid(conn, dcid);
2322
if (!chan)
2323
return -ENOENT;
2324
2325
sk = chan->sk;
2326
2327
if (sk->sk_state != BT_CONFIG && sk->sk_state != BT_CONNECT2) {
2328
struct l2cap_cmd_rej rej;
2329
2330
rej.reason = cpu_to_le16(0x0002);
2331
l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2332
sizeof(rej), &rej);
2333
goto unlock;
2334
}
2335
2336
/* Reject if config buffer is too small. */
2337
len = cmd_len - sizeof(*req);
2338
if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2339
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2340
l2cap_build_conf_rsp(chan, rsp,
2341
L2CAP_CONF_REJECT, flags), rsp);
2342
goto unlock;
2343
}
2344
2345
/* Store config. */
2346
memcpy(chan->conf_req + chan->conf_len, req->data, len);
2347
chan->conf_len += len;
2348
2349
if (flags & 0x0001) {
2350
/* Incomplete config. Send empty response. */
2351
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2352
l2cap_build_conf_rsp(chan, rsp,
2353
L2CAP_CONF_SUCCESS, 0x0001), rsp);
2354
goto unlock;
2355
}
2356
2357
/* Complete config. */
2358
len = l2cap_parse_conf_req(chan, rsp);
2359
if (len < 0) {
2360
l2cap_send_disconn_req(conn, chan, ECONNRESET);
2361
goto unlock;
2362
}
2363
2364
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2365
chan->num_conf_rsp++;
2366
2367
/* Reset config buffer. */
2368
chan->conf_len = 0;
2369
2370
if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2371
goto unlock;
2372
2373
if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2374
set_default_fcs(chan);
2375
2376
sk->sk_state = BT_CONNECTED;
2377
2378
chan->next_tx_seq = 0;
2379
chan->expected_tx_seq = 0;
2380
skb_queue_head_init(&chan->tx_q);
2381
if (chan->mode == L2CAP_MODE_ERTM)
2382
l2cap_ertm_init(chan);
2383
2384
l2cap_chan_ready(sk);
2385
goto unlock;
2386
}
2387
2388
if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2389
u8 buf[64];
2390
chan->conf_state |= L2CAP_CONF_REQ_SENT;
2391
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2392
l2cap_build_conf_req(chan, buf), buf);
2393
chan->num_conf_req++;
2394
}
2395
2396
unlock:
2397
bh_unlock_sock(sk);
2398
return 0;
2399
}
2400
2401
static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2402
{
2403
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2404
u16 scid, flags, result;
2405
struct l2cap_chan *chan;
2406
struct sock *sk;
2407
int len = cmd->len - sizeof(*rsp);
2408
2409
scid = __le16_to_cpu(rsp->scid);
2410
flags = __le16_to_cpu(rsp->flags);
2411
result = __le16_to_cpu(rsp->result);
2412
2413
BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2414
scid, flags, result);
2415
2416
chan = l2cap_get_chan_by_scid(conn, scid);
2417
if (!chan)
2418
return 0;
2419
2420
sk = chan->sk;
2421
2422
switch (result) {
2423
case L2CAP_CONF_SUCCESS:
2424
l2cap_conf_rfc_get(chan, rsp->data, len);
2425
break;
2426
2427
case L2CAP_CONF_UNACCEPT:
2428
if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2429
char req[64];
2430
2431
if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2432
l2cap_send_disconn_req(conn, chan, ECONNRESET);
2433
goto done;
2434
}
2435
2436
/* throw out any old stored conf requests */
2437
result = L2CAP_CONF_SUCCESS;
2438
len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2439
req, &result);
2440
if (len < 0) {
2441
l2cap_send_disconn_req(conn, chan, ECONNRESET);
2442
goto done;
2443
}
2444
2445
l2cap_send_cmd(conn, l2cap_get_ident(conn),
2446
L2CAP_CONF_REQ, len, req);
2447
chan->num_conf_req++;
2448
if (result != L2CAP_CONF_SUCCESS)
2449
goto done;
2450
break;
2451
}
2452
2453
default:
2454
sk->sk_err = ECONNRESET;
2455
l2cap_sock_set_timer(sk, HZ * 5);
2456
l2cap_send_disconn_req(conn, chan, ECONNRESET);
2457
goto done;
2458
}
2459
2460
if (flags & 0x01)
2461
goto done;
2462
2463
chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2464
2465
if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2466
set_default_fcs(chan);
2467
2468
sk->sk_state = BT_CONNECTED;
2469
chan->next_tx_seq = 0;
2470
chan->expected_tx_seq = 0;
2471
skb_queue_head_init(&chan->tx_q);
2472
if (chan->mode == L2CAP_MODE_ERTM)
2473
l2cap_ertm_init(chan);
2474
2475
l2cap_chan_ready(sk);
2476
}
2477
2478
done:
2479
bh_unlock_sock(sk);
2480
return 0;
2481
}
2482
2483
static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2484
{
2485
struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2486
struct l2cap_disconn_rsp rsp;
2487
u16 dcid, scid;
2488
struct l2cap_chan *chan;
2489
struct sock *sk;
2490
2491
scid = __le16_to_cpu(req->scid);
2492
dcid = __le16_to_cpu(req->dcid);
2493
2494
BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2495
2496
chan = l2cap_get_chan_by_scid(conn, dcid);
2497
if (!chan)
2498
return 0;
2499
2500
sk = chan->sk;
2501
2502
rsp.dcid = cpu_to_le16(chan->scid);
2503
rsp.scid = cpu_to_le16(chan->dcid);
2504
l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2505
2506
sk->sk_shutdown = SHUTDOWN_MASK;
2507
2508
/* don't delete l2cap channel if sk is owned by user */
2509
if (sock_owned_by_user(sk)) {
2510
sk->sk_state = BT_DISCONN;
2511
l2cap_sock_clear_timer(sk);
2512
l2cap_sock_set_timer(sk, HZ / 5);
2513
bh_unlock_sock(sk);
2514
return 0;
2515
}
2516
2517
l2cap_chan_del(chan, ECONNRESET);
2518
bh_unlock_sock(sk);
2519
2520
l2cap_sock_kill(sk);
2521
return 0;
2522
}
2523
2524
static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2525
{
2526
struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2527
u16 dcid, scid;
2528
struct l2cap_chan *chan;
2529
struct sock *sk;
2530
2531
scid = __le16_to_cpu(rsp->scid);
2532
dcid = __le16_to_cpu(rsp->dcid);
2533
2534
BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2535
2536
chan = l2cap_get_chan_by_scid(conn, scid);
2537
if (!chan)
2538
return 0;
2539
2540
sk = chan->sk;
2541
2542
/* don't delete l2cap channel if sk is owned by user */
2543
if (sock_owned_by_user(sk)) {
2544
sk->sk_state = BT_DISCONN;
2545
l2cap_sock_clear_timer(sk);
2546
l2cap_sock_set_timer(sk, HZ / 5);
2547
bh_unlock_sock(sk);
2548
return 0;
2549
}
2550
2551
l2cap_chan_del(chan, 0);
2552
bh_unlock_sock(sk);
2553
2554
l2cap_sock_kill(sk);
2555
return 0;
2556
}
2557
2558
static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2559
{
2560
struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2561
u16 type;
2562
2563
type = __le16_to_cpu(req->type);
2564
2565
BT_DBG("type 0x%4.4x", type);
2566
2567
if (type == L2CAP_IT_FEAT_MASK) {
2568
u8 buf[8];
2569
u32 feat_mask = l2cap_feat_mask;
2570
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2571
rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2572
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2573
if (!disable_ertm)
2574
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2575
| L2CAP_FEAT_FCS;
2576
put_unaligned_le32(feat_mask, rsp->data);
2577
l2cap_send_cmd(conn, cmd->ident,
2578
L2CAP_INFO_RSP, sizeof(buf), buf);
2579
} else if (type == L2CAP_IT_FIXED_CHAN) {
2580
u8 buf[12];
2581
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2582
rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2583
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2584
memcpy(buf + 4, l2cap_fixed_chan, 8);
2585
l2cap_send_cmd(conn, cmd->ident,
2586
L2CAP_INFO_RSP, sizeof(buf), buf);
2587
} else {
2588
struct l2cap_info_rsp rsp;
2589
rsp.type = cpu_to_le16(type);
2590
rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2591
l2cap_send_cmd(conn, cmd->ident,
2592
L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2593
}
2594
2595
return 0;
2596
}
2597
2598
static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2599
{
2600
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2601
u16 type, result;
2602
2603
type = __le16_to_cpu(rsp->type);
2604
result = __le16_to_cpu(rsp->result);
2605
2606
BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2607
2608
/* L2CAP Info req/rsp are unbound to channels, add extra checks */
2609
if (cmd->ident != conn->info_ident ||
2610
conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2611
return 0;
2612
2613
del_timer(&conn->info_timer);
2614
2615
if (result != L2CAP_IR_SUCCESS) {
2616
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2617
conn->info_ident = 0;
2618
2619
l2cap_conn_start(conn);
2620
2621
return 0;
2622
}
2623
2624
if (type == L2CAP_IT_FEAT_MASK) {
2625
conn->feat_mask = get_unaligned_le32(rsp->data);
2626
2627
if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2628
struct l2cap_info_req req;
2629
req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2630
2631
conn->info_ident = l2cap_get_ident(conn);
2632
2633
l2cap_send_cmd(conn, conn->info_ident,
2634
L2CAP_INFO_REQ, sizeof(req), &req);
2635
} else {
2636
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2637
conn->info_ident = 0;
2638
2639
l2cap_conn_start(conn);
2640
}
2641
} else if (type == L2CAP_IT_FIXED_CHAN) {
2642
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2643
conn->info_ident = 0;
2644
2645
l2cap_conn_start(conn);
2646
}
2647
2648
return 0;
2649
}
2650
2651
static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2652
u16 to_multiplier)
2653
{
2654
u16 max_latency;
2655
2656
if (min > max || min < 6 || max > 3200)
2657
return -EINVAL;
2658
2659
if (to_multiplier < 10 || to_multiplier > 3200)
2660
return -EINVAL;
2661
2662
if (max >= to_multiplier * 8)
2663
return -EINVAL;
2664
2665
max_latency = (to_multiplier * 8 / max) - 1;
2666
if (latency > 499 || latency > max_latency)
2667
return -EINVAL;
2668
2669
return 0;
2670
}
2671
2672
static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2673
struct l2cap_cmd_hdr *cmd, u8 *data)
2674
{
2675
struct hci_conn *hcon = conn->hcon;
2676
struct l2cap_conn_param_update_req *req;
2677
struct l2cap_conn_param_update_rsp rsp;
2678
u16 min, max, latency, to_multiplier, cmd_len;
2679
int err;
2680
2681
if (!(hcon->link_mode & HCI_LM_MASTER))
2682
return -EINVAL;
2683
2684
cmd_len = __le16_to_cpu(cmd->len);
2685
if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2686
return -EPROTO;
2687
2688
req = (struct l2cap_conn_param_update_req *) data;
2689
min = __le16_to_cpu(req->min);
2690
max = __le16_to_cpu(req->max);
2691
latency = __le16_to_cpu(req->latency);
2692
to_multiplier = __le16_to_cpu(req->to_multiplier);
2693
2694
BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2695
min, max, latency, to_multiplier);
2696
2697
memset(&rsp, 0, sizeof(rsp));
2698
2699
err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2700
if (err)
2701
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2702
else
2703
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2704
2705
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2706
sizeof(rsp), &rsp);
2707
2708
if (!err)
2709
hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2710
2711
return 0;
2712
}
2713
2714
static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2715
struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2716
{
2717
int err = 0;
2718
2719
switch (cmd->code) {
2720
case L2CAP_COMMAND_REJ:
2721
l2cap_command_rej(conn, cmd, data);
2722
break;
2723
2724
case L2CAP_CONN_REQ:
2725
err = l2cap_connect_req(conn, cmd, data);
2726
break;
2727
2728
case L2CAP_CONN_RSP:
2729
err = l2cap_connect_rsp(conn, cmd, data);
2730
break;
2731
2732
case L2CAP_CONF_REQ:
2733
err = l2cap_config_req(conn, cmd, cmd_len, data);
2734
break;
2735
2736
case L2CAP_CONF_RSP:
2737
err = l2cap_config_rsp(conn, cmd, data);
2738
break;
2739
2740
case L2CAP_DISCONN_REQ:
2741
err = l2cap_disconnect_req(conn, cmd, data);
2742
break;
2743
2744
case L2CAP_DISCONN_RSP:
2745
err = l2cap_disconnect_rsp(conn, cmd, data);
2746
break;
2747
2748
case L2CAP_ECHO_REQ:
2749
l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2750
break;
2751
2752
case L2CAP_ECHO_RSP:
2753
break;
2754
2755
case L2CAP_INFO_REQ:
2756
err = l2cap_information_req(conn, cmd, data);
2757
break;
2758
2759
case L2CAP_INFO_RSP:
2760
err = l2cap_information_rsp(conn, cmd, data);
2761
break;
2762
2763
default:
2764
BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2765
err = -EINVAL;
2766
break;
2767
}
2768
2769
return err;
2770
}
2771
2772
static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2773
struct l2cap_cmd_hdr *cmd, u8 *data)
2774
{
2775
switch (cmd->code) {
2776
case L2CAP_COMMAND_REJ:
2777
return 0;
2778
2779
case L2CAP_CONN_PARAM_UPDATE_REQ:
2780
return l2cap_conn_param_update_req(conn, cmd, data);
2781
2782
case L2CAP_CONN_PARAM_UPDATE_RSP:
2783
return 0;
2784
2785
default:
2786
BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2787
return -EINVAL;
2788
}
2789
}
2790
2791
static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2792
struct sk_buff *skb)
2793
{
2794
u8 *data = skb->data;
2795
int len = skb->len;
2796
struct l2cap_cmd_hdr cmd;
2797
int err;
2798
2799
l2cap_raw_recv(conn, skb);
2800
2801
while (len >= L2CAP_CMD_HDR_SIZE) {
2802
u16 cmd_len;
2803
memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2804
data += L2CAP_CMD_HDR_SIZE;
2805
len -= L2CAP_CMD_HDR_SIZE;
2806
2807
cmd_len = le16_to_cpu(cmd.len);
2808
2809
BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2810
2811
if (cmd_len > len || !cmd.ident) {
2812
BT_DBG("corrupted command");
2813
break;
2814
}
2815
2816
if (conn->hcon->type == LE_LINK)
2817
err = l2cap_le_sig_cmd(conn, &cmd, data);
2818
else
2819
err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2820
2821
if (err) {
2822
struct l2cap_cmd_rej rej;
2823
2824
BT_ERR("Wrong link type (%d)", err);
2825
2826
/* FIXME: Map err to a valid reason */
2827
rej.reason = cpu_to_le16(0);
2828
l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2829
}
2830
2831
data += cmd_len;
2832
len -= cmd_len;
2833
}
2834
2835
kfree_skb(skb);
2836
}
2837
2838
static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
2839
{
2840
u16 our_fcs, rcv_fcs;
2841
int hdr_size = L2CAP_HDR_SIZE + 2;
2842
2843
if (chan->fcs == L2CAP_FCS_CRC16) {
2844
skb_trim(skb, skb->len - 2);
2845
rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2846
our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2847
2848
if (our_fcs != rcv_fcs)
2849
return -EBADMSG;
2850
}
2851
return 0;
2852
}
2853
2854
static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2855
{
2856
u16 control = 0;
2857
2858
chan->frames_sent = 0;
2859
2860
control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2861
2862
if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2863
control |= L2CAP_SUPER_RCV_NOT_READY;
2864
l2cap_send_sframe(chan, control);
2865
chan->conn_state |= L2CAP_CONN_RNR_SENT;
2866
}
2867
2868
if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2869
l2cap_retransmit_frames(chan);
2870
2871
l2cap_ertm_send(chan);
2872
2873
if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2874
chan->frames_sent == 0) {
2875
control |= L2CAP_SUPER_RCV_READY;
2876
l2cap_send_sframe(chan, control);
2877
}
2878
}
2879
2880
static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2881
{
2882
struct sk_buff *next_skb;
2883
int tx_seq_offset, next_tx_seq_offset;
2884
2885
bt_cb(skb)->tx_seq = tx_seq;
2886
bt_cb(skb)->sar = sar;
2887
2888
next_skb = skb_peek(&chan->srej_q);
2889
if (!next_skb) {
2890
__skb_queue_tail(&chan->srej_q, skb);
2891
return 0;
2892
}
2893
2894
tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2895
if (tx_seq_offset < 0)
2896
tx_seq_offset += 64;
2897
2898
do {
2899
if (bt_cb(next_skb)->tx_seq == tx_seq)
2900
return -EINVAL;
2901
2902
next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2903
chan->buffer_seq) % 64;
2904
if (next_tx_seq_offset < 0)
2905
next_tx_seq_offset += 64;
2906
2907
if (next_tx_seq_offset > tx_seq_offset) {
2908
__skb_queue_before(&chan->srej_q, next_skb, skb);
2909
return 0;
2910
}
2911
2912
if (skb_queue_is_last(&chan->srej_q, next_skb))
2913
break;
2914
2915
} while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2916
2917
__skb_queue_tail(&chan->srej_q, skb);
2918
2919
return 0;
2920
}
2921
2922
static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2923
{
2924
struct sk_buff *_skb;
2925
int err;
2926
2927
switch (control & L2CAP_CTRL_SAR) {
2928
case L2CAP_SDU_UNSEGMENTED:
2929
if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2930
goto drop;
2931
2932
err = sock_queue_rcv_skb(chan->sk, skb);
2933
if (!err)
2934
return err;
2935
2936
break;
2937
2938
case L2CAP_SDU_START:
2939
if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2940
goto drop;
2941
2942
chan->sdu_len = get_unaligned_le16(skb->data);
2943
2944
if (chan->sdu_len > chan->imtu)
2945
goto disconnect;
2946
2947
chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2948
if (!chan->sdu)
2949
return -ENOMEM;
2950
2951
/* pull sdu_len bytes only after alloc, because of Local Busy
2952
* condition we have to be sure that this will be executed
2953
* only once, i.e., when alloc does not fail */
2954
skb_pull(skb, 2);
2955
2956
memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2957
2958
chan->conn_state |= L2CAP_CONN_SAR_SDU;
2959
chan->partial_sdu_len = skb->len;
2960
break;
2961
2962
case L2CAP_SDU_CONTINUE:
2963
if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2964
goto disconnect;
2965
2966
if (!chan->sdu)
2967
goto disconnect;
2968
2969
chan->partial_sdu_len += skb->len;
2970
if (chan->partial_sdu_len > chan->sdu_len)
2971
goto drop;
2972
2973
memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2974
2975
break;
2976
2977
case L2CAP_SDU_END:
2978
if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2979
goto disconnect;
2980
2981
if (!chan->sdu)
2982
goto disconnect;
2983
2984
if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2985
chan->partial_sdu_len += skb->len;
2986
2987
if (chan->partial_sdu_len > chan->imtu)
2988
goto drop;
2989
2990
if (chan->partial_sdu_len != chan->sdu_len)
2991
goto drop;
2992
2993
memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2994
}
2995
2996
_skb = skb_clone(chan->sdu, GFP_ATOMIC);
2997
if (!_skb) {
2998
chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2999
return -ENOMEM;
3000
}
3001
3002
err = sock_queue_rcv_skb(chan->sk, _skb);
3003
if (err < 0) {
3004
kfree_skb(_skb);
3005
chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3006
return err;
3007
}
3008
3009
chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3010
chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3011
3012
kfree_skb(chan->sdu);
3013
break;
3014
}
3015
3016
kfree_skb(skb);
3017
return 0;
3018
3019
drop:
3020
kfree_skb(chan->sdu);
3021
chan->sdu = NULL;
3022
3023
disconnect:
3024
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3025
kfree_skb(skb);
3026
return 0;
3027
}
3028
3029
static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
3030
{
3031
struct sk_buff *skb;
3032
u16 control;
3033
int err;
3034
3035
while ((skb = skb_dequeue(&chan->busy_q))) {
3036
control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3037
err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3038
if (err < 0) {
3039
skb_queue_head(&chan->busy_q, skb);
3040
return -EBUSY;
3041
}
3042
3043
chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3044
}
3045
3046
if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
3047
goto done;
3048
3049
control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3050
control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3051
l2cap_send_sframe(chan, control);
3052
chan->retry_count = 1;
3053
3054
del_timer(&chan->retrans_timer);
3055
__mod_monitor_timer();
3056
3057
chan->conn_state |= L2CAP_CONN_WAIT_F;
3058
3059
done:
3060
chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3061
chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3062
3063
BT_DBG("chan %p, Exit local busy", chan);
3064
3065
return 0;
3066
}
3067
3068
static void l2cap_busy_work(struct work_struct *work)
3069
{
3070
DECLARE_WAITQUEUE(wait, current);
3071
struct l2cap_chan *chan =
3072
container_of(work, struct l2cap_chan, busy_work);
3073
struct sock *sk = chan->sk;
3074
int n_tries = 0, timeo = HZ/5, err;
3075
struct sk_buff *skb;
3076
3077
lock_sock(sk);
3078
3079
add_wait_queue(sk_sleep(sk), &wait);
3080
while ((skb = skb_peek(&chan->busy_q))) {
3081
set_current_state(TASK_INTERRUPTIBLE);
3082
3083
if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3084
err = -EBUSY;
3085
l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3086
break;
3087
}
3088
3089
if (!timeo)
3090
timeo = HZ/5;
3091
3092
if (signal_pending(current)) {
3093
err = sock_intr_errno(timeo);
3094
break;
3095
}
3096
3097
release_sock(sk);
3098
timeo = schedule_timeout(timeo);
3099
lock_sock(sk);
3100
3101
err = sock_error(sk);
3102
if (err)
3103
break;
3104
3105
if (l2cap_try_push_rx_skb(chan) == 0)
3106
break;
3107
}
3108
3109
set_current_state(TASK_RUNNING);
3110
remove_wait_queue(sk_sleep(sk), &wait);
3111
3112
release_sock(sk);
3113
}
3114
3115
static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3116
{
3117
int sctrl, err;
3118
3119
if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3120
bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3121
__skb_queue_tail(&chan->busy_q, skb);
3122
return l2cap_try_push_rx_skb(chan);
3123
3124
3125
}
3126
3127
err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3128
if (err >= 0) {
3129
chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3130
return err;
3131
}
3132
3133
/* Busy Condition */
3134
BT_DBG("chan %p, Enter local busy", chan);
3135
3136
chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3137
bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3138
__skb_queue_tail(&chan->busy_q, skb);
3139
3140
sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3141
sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3142
l2cap_send_sframe(chan, sctrl);
3143
3144
chan->conn_state |= L2CAP_CONN_RNR_SENT;
3145
3146
del_timer(&chan->ack_timer);
3147
3148
queue_work(_busy_wq, &chan->busy_work);
3149
3150
return err;
3151
}
3152
3153
static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3154
{
3155
struct sk_buff *_skb;
3156
int err = -EINVAL;
3157
3158
/*
3159
* TODO: We have to notify the userland if some data is lost with the
3160
* Streaming Mode.
3161
*/
3162
3163
switch (control & L2CAP_CTRL_SAR) {
3164
case L2CAP_SDU_UNSEGMENTED:
3165
if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3166
kfree_skb(chan->sdu);
3167
break;
3168
}
3169
3170
err = sock_queue_rcv_skb(chan->sk, skb);
3171
if (!err)
3172
return 0;
3173
3174
break;
3175
3176
case L2CAP_SDU_START:
3177
if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3178
kfree_skb(chan->sdu);
3179
break;
3180
}
3181
3182
chan->sdu_len = get_unaligned_le16(skb->data);
3183
skb_pull(skb, 2);
3184
3185
if (chan->sdu_len > chan->imtu) {
3186
err = -EMSGSIZE;
3187
break;
3188
}
3189
3190
chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3191
if (!chan->sdu) {
3192
err = -ENOMEM;
3193
break;
3194
}
3195
3196
memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3197
3198
chan->conn_state |= L2CAP_CONN_SAR_SDU;
3199
chan->partial_sdu_len = skb->len;
3200
err = 0;
3201
break;
3202
3203
case L2CAP_SDU_CONTINUE:
3204
if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3205
break;
3206
3207
memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3208
3209
chan->partial_sdu_len += skb->len;
3210
if (chan->partial_sdu_len > chan->sdu_len)
3211
kfree_skb(chan->sdu);
3212
else
3213
err = 0;
3214
3215
break;
3216
3217
case L2CAP_SDU_END:
3218
if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3219
break;
3220
3221
memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3222
3223
chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3224
chan->partial_sdu_len += skb->len;
3225
3226
if (chan->partial_sdu_len > chan->imtu)
3227
goto drop;
3228
3229
if (chan->partial_sdu_len == chan->sdu_len) {
3230
_skb = skb_clone(chan->sdu, GFP_ATOMIC);
3231
err = sock_queue_rcv_skb(chan->sk, _skb);
3232
if (err < 0)
3233
kfree_skb(_skb);
3234
}
3235
err = 0;
3236
3237
drop:
3238
kfree_skb(chan->sdu);
3239
break;
3240
}
3241
3242
kfree_skb(skb);
3243
return err;
3244
}
3245
3246
static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3247
{
3248
struct sk_buff *skb;
3249
u16 control;
3250
3251
while ((skb = skb_peek(&chan->srej_q))) {
3252
if (bt_cb(skb)->tx_seq != tx_seq)
3253
break;
3254
3255
skb = skb_dequeue(&chan->srej_q);
3256
control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3257
l2cap_ertm_reassembly_sdu(chan, skb, control);
3258
chan->buffer_seq_srej =
3259
(chan->buffer_seq_srej + 1) % 64;
3260
tx_seq = (tx_seq + 1) % 64;
3261
}
3262
}
3263
3264
static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3265
{
3266
struct srej_list *l, *tmp;
3267
u16 control;
3268
3269
list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3270
if (l->tx_seq == tx_seq) {
3271
list_del(&l->list);
3272
kfree(l);
3273
return;
3274
}
3275
control = L2CAP_SUPER_SELECT_REJECT;
3276
control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3277
l2cap_send_sframe(chan, control);
3278
list_del(&l->list);
3279
list_add_tail(&l->list, &chan->srej_l);
3280
}
3281
}
3282
3283
static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3284
{
3285
struct srej_list *new;
3286
u16 control;
3287
3288
while (tx_seq != chan->expected_tx_seq) {
3289
control = L2CAP_SUPER_SELECT_REJECT;
3290
control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3291
l2cap_send_sframe(chan, control);
3292
3293
new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3294
new->tx_seq = chan->expected_tx_seq;
3295
chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3296
list_add_tail(&new->list, &chan->srej_l);
3297
}
3298
chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3299
}
3300
3301
static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3302
{
3303
u8 tx_seq = __get_txseq(rx_control);
3304
u8 req_seq = __get_reqseq(rx_control);
3305
u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3306
int tx_seq_offset, expected_tx_seq_offset;
3307
int num_to_ack = (chan->tx_win/6) + 1;
3308
int err = 0;
3309
3310
BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3311
tx_seq, rx_control);
3312
3313
if (L2CAP_CTRL_FINAL & rx_control &&
3314
chan->conn_state & L2CAP_CONN_WAIT_F) {
3315
del_timer(&chan->monitor_timer);
3316
if (chan->unacked_frames > 0)
3317
__mod_retrans_timer();
3318
chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3319
}
3320
3321
chan->expected_ack_seq = req_seq;
3322
l2cap_drop_acked_frames(chan);
3323
3324
if (tx_seq == chan->expected_tx_seq)
3325
goto expected;
3326
3327
tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3328
if (tx_seq_offset < 0)
3329
tx_seq_offset += 64;
3330
3331
/* invalid tx_seq */
3332
if (tx_seq_offset >= chan->tx_win) {
3333
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3334
goto drop;
3335
}
3336
3337
if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3338
goto drop;
3339
3340
if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3341
struct srej_list *first;
3342
3343
first = list_first_entry(&chan->srej_l,
3344
struct srej_list, list);
3345
if (tx_seq == first->tx_seq) {
3346
l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3347
l2cap_check_srej_gap(chan, tx_seq);
3348
3349
list_del(&first->list);
3350
kfree(first);
3351
3352
if (list_empty(&chan->srej_l)) {
3353
chan->buffer_seq = chan->buffer_seq_srej;
3354
chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3355
l2cap_send_ack(chan);
3356
BT_DBG("chan %p, Exit SREJ_SENT", chan);
3357
}
3358
} else {
3359
struct srej_list *l;
3360
3361
/* duplicated tx_seq */
3362
if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3363
goto drop;
3364
3365
list_for_each_entry(l, &chan->srej_l, list) {
3366
if (l->tx_seq == tx_seq) {
3367
l2cap_resend_srejframe(chan, tx_seq);
3368
return 0;
3369
}
3370
}
3371
l2cap_send_srejframe(chan, tx_seq);
3372
}
3373
} else {
3374
expected_tx_seq_offset =
3375
(chan->expected_tx_seq - chan->buffer_seq) % 64;
3376
if (expected_tx_seq_offset < 0)
3377
expected_tx_seq_offset += 64;
3378
3379
/* duplicated tx_seq */
3380
if (tx_seq_offset < expected_tx_seq_offset)
3381
goto drop;
3382
3383
chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3384
3385
BT_DBG("chan %p, Enter SREJ", chan);
3386
3387
INIT_LIST_HEAD(&chan->srej_l);
3388
chan->buffer_seq_srej = chan->buffer_seq;
3389
3390
__skb_queue_head_init(&chan->srej_q);
3391
__skb_queue_head_init(&chan->busy_q);
3392
l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3393
3394
chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3395
3396
l2cap_send_srejframe(chan, tx_seq);
3397
3398
del_timer(&chan->ack_timer);
3399
}
3400
return 0;
3401
3402
expected:
3403
chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3404
3405
if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3406
bt_cb(skb)->tx_seq = tx_seq;
3407
bt_cb(skb)->sar = sar;
3408
__skb_queue_tail(&chan->srej_q, skb);
3409
return 0;
3410
}
3411
3412
err = l2cap_push_rx_skb(chan, skb, rx_control);
3413
if (err < 0)
3414
return 0;
3415
3416
if (rx_control & L2CAP_CTRL_FINAL) {
3417
if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3418
chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3419
else
3420
l2cap_retransmit_frames(chan);
3421
}
3422
3423
__mod_ack_timer();
3424
3425
chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3426
if (chan->num_acked == num_to_ack - 1)
3427
l2cap_send_ack(chan);
3428
3429
return 0;
3430
3431
drop:
3432
kfree_skb(skb);
3433
return 0;
3434
}
3435
3436
static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3437
{
3438
BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3439
rx_control);
3440
3441
chan->expected_ack_seq = __get_reqseq(rx_control);
3442
l2cap_drop_acked_frames(chan);
3443
3444
if (rx_control & L2CAP_CTRL_POLL) {
3445
chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3446
if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3447
if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3448
(chan->unacked_frames > 0))
3449
__mod_retrans_timer();
3450
3451
chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3452
l2cap_send_srejtail(chan);
3453
} else {
3454
l2cap_send_i_or_rr_or_rnr(chan);
3455
}
3456
3457
} else if (rx_control & L2CAP_CTRL_FINAL) {
3458
chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3459
3460
if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3461
chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3462
else
3463
l2cap_retransmit_frames(chan);
3464
3465
} else {
3466
if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3467
(chan->unacked_frames > 0))
3468
__mod_retrans_timer();
3469
3470
chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3471
if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3472
l2cap_send_ack(chan);
3473
else
3474
l2cap_ertm_send(chan);
3475
}
3476
}
3477
3478
static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3479
{
3480
u8 tx_seq = __get_reqseq(rx_control);
3481
3482
BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3483
3484
chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3485
3486
chan->expected_ack_seq = tx_seq;
3487
l2cap_drop_acked_frames(chan);
3488
3489
if (rx_control & L2CAP_CTRL_FINAL) {
3490
if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3491
chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3492
else
3493
l2cap_retransmit_frames(chan);
3494
} else {
3495
l2cap_retransmit_frames(chan);
3496
3497
if (chan->conn_state & L2CAP_CONN_WAIT_F)
3498
chan->conn_state |= L2CAP_CONN_REJ_ACT;
3499
}
3500
}
3501
static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3502
{
3503
u8 tx_seq = __get_reqseq(rx_control);
3504
3505
BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3506
3507
chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3508
3509
if (rx_control & L2CAP_CTRL_POLL) {
3510
chan->expected_ack_seq = tx_seq;
3511
l2cap_drop_acked_frames(chan);
3512
3513
chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3514
l2cap_retransmit_one_frame(chan, tx_seq);
3515
3516
l2cap_ertm_send(chan);
3517
3518
if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3519
chan->srej_save_reqseq = tx_seq;
3520
chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3521
}
3522
} else if (rx_control & L2CAP_CTRL_FINAL) {
3523
if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3524
chan->srej_save_reqseq == tx_seq)
3525
chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3526
else
3527
l2cap_retransmit_one_frame(chan, tx_seq);
3528
} else {
3529
l2cap_retransmit_one_frame(chan, tx_seq);
3530
if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3531
chan->srej_save_reqseq = tx_seq;
3532
chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3533
}
3534
}
3535
}
3536
3537
static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3538
{
3539
u8 tx_seq = __get_reqseq(rx_control);
3540
3541
BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3542
3543
chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3544
chan->expected_ack_seq = tx_seq;
3545
l2cap_drop_acked_frames(chan);
3546
3547
if (rx_control & L2CAP_CTRL_POLL)
3548
chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3549
3550
if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3551
del_timer(&chan->retrans_timer);
3552
if (rx_control & L2CAP_CTRL_POLL)
3553
l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3554
return;
3555
}
3556
3557
if (rx_control & L2CAP_CTRL_POLL)
3558
l2cap_send_srejtail(chan);
3559
else
3560
l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3561
}
3562
3563
static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3564
{
3565
BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3566
3567
if (L2CAP_CTRL_FINAL & rx_control &&
3568
chan->conn_state & L2CAP_CONN_WAIT_F) {
3569
del_timer(&chan->monitor_timer);
3570
if (chan->unacked_frames > 0)
3571
__mod_retrans_timer();
3572
chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3573
}
3574
3575
switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3576
case L2CAP_SUPER_RCV_READY:
3577
l2cap_data_channel_rrframe(chan, rx_control);
3578
break;
3579
3580
case L2CAP_SUPER_REJECT:
3581
l2cap_data_channel_rejframe(chan, rx_control);
3582
break;
3583
3584
case L2CAP_SUPER_SELECT_REJECT:
3585
l2cap_data_channel_srejframe(chan, rx_control);
3586
break;
3587
3588
case L2CAP_SUPER_RCV_NOT_READY:
3589
l2cap_data_channel_rnrframe(chan, rx_control);
3590
break;
3591
}
3592
3593
kfree_skb(skb);
3594
return 0;
3595
}
3596
3597
static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3598
{
3599
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3600
u16 control;
3601
u8 req_seq;
3602
int len, next_tx_seq_offset, req_seq_offset;
3603
3604
control = get_unaligned_le16(skb->data);
3605
skb_pull(skb, 2);
3606
len = skb->len;
3607
3608
/*
3609
* We can just drop the corrupted I-frame here.
3610
* Receiver will miss it and start proper recovery
3611
* procedures and ask retransmission.
3612
*/
3613
if (l2cap_check_fcs(chan, skb))
3614
goto drop;
3615
3616
if (__is_sar_start(control) && __is_iframe(control))
3617
len -= 2;
3618
3619
if (chan->fcs == L2CAP_FCS_CRC16)
3620
len -= 2;
3621
3622
if (len > chan->mps) {
3623
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3624
goto drop;
3625
}
3626
3627
req_seq = __get_reqseq(control);
3628
req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3629
if (req_seq_offset < 0)
3630
req_seq_offset += 64;
3631
3632
next_tx_seq_offset =
3633
(chan->next_tx_seq - chan->expected_ack_seq) % 64;
3634
if (next_tx_seq_offset < 0)
3635
next_tx_seq_offset += 64;
3636
3637
/* check for invalid req-seq */
3638
if (req_seq_offset > next_tx_seq_offset) {
3639
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3640
goto drop;
3641
}
3642
3643
if (__is_iframe(control)) {
3644
if (len < 0) {
3645
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3646
goto drop;
3647
}
3648
3649
l2cap_data_channel_iframe(chan, control, skb);
3650
} else {
3651
if (len != 0) {
3652
BT_ERR("%d", len);
3653
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3654
goto drop;
3655
}
3656
3657
l2cap_data_channel_sframe(chan, control, skb);
3658
}
3659
3660
return 0;
3661
3662
drop:
3663
kfree_skb(skb);
3664
return 0;
3665
}
3666
3667
static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3668
{
3669
struct l2cap_chan *chan;
3670
struct sock *sk = NULL;
3671
struct l2cap_pinfo *pi;
3672
u16 control;
3673
u8 tx_seq;
3674
int len;
3675
3676
chan = l2cap_get_chan_by_scid(conn, cid);
3677
if (!chan) {
3678
BT_DBG("unknown cid 0x%4.4x", cid);
3679
goto drop;
3680
}
3681
3682
sk = chan->sk;
3683
pi = l2cap_pi(sk);
3684
3685
BT_DBG("chan %p, len %d", chan, skb->len);
3686
3687
if (sk->sk_state != BT_CONNECTED)
3688
goto drop;
3689
3690
switch (chan->mode) {
3691
case L2CAP_MODE_BASIC:
3692
/* If socket recv buffers overflows we drop data here
3693
* which is *bad* because L2CAP has to be reliable.
3694
* But we don't have any other choice. L2CAP doesn't
3695
* provide flow control mechanism. */
3696
3697
if (chan->imtu < skb->len)
3698
goto drop;
3699
3700
if (!sock_queue_rcv_skb(sk, skb))
3701
goto done;
3702
break;
3703
3704
case L2CAP_MODE_ERTM:
3705
if (!sock_owned_by_user(sk)) {
3706
l2cap_ertm_data_rcv(sk, skb);
3707
} else {
3708
if (sk_add_backlog(sk, skb))
3709
goto drop;
3710
}
3711
3712
goto done;
3713
3714
case L2CAP_MODE_STREAMING:
3715
control = get_unaligned_le16(skb->data);
3716
skb_pull(skb, 2);
3717
len = skb->len;
3718
3719
if (l2cap_check_fcs(chan, skb))
3720
goto drop;
3721
3722
if (__is_sar_start(control))
3723
len -= 2;
3724
3725
if (chan->fcs == L2CAP_FCS_CRC16)
3726
len -= 2;
3727
3728
if (len > chan->mps || len < 0 || __is_sframe(control))
3729
goto drop;
3730
3731
tx_seq = __get_txseq(control);
3732
3733
if (chan->expected_tx_seq == tx_seq)
3734
chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3735
else
3736
chan->expected_tx_seq = (tx_seq + 1) % 64;
3737
3738
l2cap_streaming_reassembly_sdu(chan, skb, control);
3739
3740
goto done;
3741
3742
default:
3743
BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3744
break;
3745
}
3746
3747
drop:
3748
kfree_skb(skb);
3749
3750
done:
3751
if (sk)
3752
bh_unlock_sock(sk);
3753
3754
return 0;
3755
}
3756
3757
static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3758
{
3759
struct sock *sk = NULL;
3760
struct l2cap_chan *chan;
3761
3762
chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3763
if (!chan)
3764
goto drop;
3765
3766
sk = chan->sk;
3767
3768
bh_lock_sock(sk);
3769
3770
BT_DBG("sk %p, len %d", sk, skb->len);
3771
3772
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3773
goto drop;
3774
3775
if (l2cap_pi(sk)->chan->imtu < skb->len)
3776
goto drop;
3777
3778
if (!sock_queue_rcv_skb(sk, skb))
3779
goto done;
3780
3781
drop:
3782
kfree_skb(skb);
3783
3784
done:
3785
if (sk)
3786
bh_unlock_sock(sk);
3787
return 0;
3788
}
3789
3790
static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3791
{
3792
struct sock *sk = NULL;
3793
struct l2cap_chan *chan;
3794
3795
chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3796
if (!chan)
3797
goto drop;
3798
3799
sk = chan->sk;
3800
3801
bh_lock_sock(sk);
3802
3803
BT_DBG("sk %p, len %d", sk, skb->len);
3804
3805
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3806
goto drop;
3807
3808
if (l2cap_pi(sk)->chan->imtu < skb->len)
3809
goto drop;
3810
3811
if (!sock_queue_rcv_skb(sk, skb))
3812
goto done;
3813
3814
drop:
3815
kfree_skb(skb);
3816
3817
done:
3818
if (sk)
3819
bh_unlock_sock(sk);
3820
return 0;
3821
}
3822
3823
static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3824
{
3825
struct l2cap_hdr *lh = (void *) skb->data;
3826
u16 cid, len;
3827
__le16 psm;
3828
3829
skb_pull(skb, L2CAP_HDR_SIZE);
3830
cid = __le16_to_cpu(lh->cid);
3831
len = __le16_to_cpu(lh->len);
3832
3833
if (len != skb->len) {
3834
kfree_skb(skb);
3835
return;
3836
}
3837
3838
BT_DBG("len %d, cid 0x%4.4x", len, cid);
3839
3840
switch (cid) {
3841
case L2CAP_CID_LE_SIGNALING:
3842
case L2CAP_CID_SIGNALING:
3843
l2cap_sig_channel(conn, skb);
3844
break;
3845
3846
case L2CAP_CID_CONN_LESS:
3847
psm = get_unaligned_le16(skb->data);
3848
skb_pull(skb, 2);
3849
l2cap_conless_channel(conn, psm, skb);
3850
break;
3851
3852
case L2CAP_CID_LE_DATA:
3853
l2cap_att_channel(conn, cid, skb);
3854
break;
3855
3856
default:
3857
l2cap_data_channel(conn, cid, skb);
3858
break;
3859
}
3860
}
3861
3862
/* ---- L2CAP interface with lower layer (HCI) ---- */
3863
3864
static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3865
{
3866
int exact = 0, lm1 = 0, lm2 = 0;
3867
struct l2cap_chan *c;
3868
3869
if (type != ACL_LINK)
3870
return -EINVAL;
3871
3872
BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3873
3874
/* Find listening sockets and check their link_mode */
3875
read_lock(&chan_list_lock);
3876
list_for_each_entry(c, &chan_list, global_l) {
3877
struct sock *sk = c->sk;
3878
3879
if (sk->sk_state != BT_LISTEN)
3880
continue;
3881
3882
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3883
lm1 |= HCI_LM_ACCEPT;
3884
if (c->role_switch)
3885
lm1 |= HCI_LM_MASTER;
3886
exact++;
3887
} else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3888
lm2 |= HCI_LM_ACCEPT;
3889
if (c->role_switch)
3890
lm2 |= HCI_LM_MASTER;
3891
}
3892
}
3893
read_unlock(&chan_list_lock);
3894
3895
return exact ? lm1 : lm2;
3896
}
3897
3898
static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3899
{
3900
struct l2cap_conn *conn;
3901
3902
BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3903
3904
if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3905
return -EINVAL;
3906
3907
if (!status) {
3908
conn = l2cap_conn_add(hcon, status);
3909
if (conn)
3910
l2cap_conn_ready(conn);
3911
} else
3912
l2cap_conn_del(hcon, bt_err(status));
3913
3914
return 0;
3915
}
3916
3917
static int l2cap_disconn_ind(struct hci_conn *hcon)
3918
{
3919
struct l2cap_conn *conn = hcon->l2cap_data;
3920
3921
BT_DBG("hcon %p", hcon);
3922
3923
if (hcon->type != ACL_LINK || !conn)
3924
return 0x13;
3925
3926
return conn->disc_reason;
3927
}
3928
3929
static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3930
{
3931
BT_DBG("hcon %p reason %d", hcon, reason);
3932
3933
if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3934
return -EINVAL;
3935
3936
l2cap_conn_del(hcon, bt_err(reason));
3937
3938
return 0;
3939
}
3940
3941
static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3942
{
3943
struct sock *sk = chan->sk;
3944
3945
if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3946
return;
3947
3948
if (encrypt == 0x00) {
3949
if (chan->sec_level == BT_SECURITY_MEDIUM) {
3950
l2cap_sock_clear_timer(sk);
3951
l2cap_sock_set_timer(sk, HZ * 5);
3952
} else if (chan->sec_level == BT_SECURITY_HIGH)
3953
__l2cap_sock_close(sk, ECONNREFUSED);
3954
} else {
3955
if (chan->sec_level == BT_SECURITY_MEDIUM)
3956
l2cap_sock_clear_timer(sk);
3957
}
3958
}
3959
3960
static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3961
{
3962
struct l2cap_conn *conn = hcon->l2cap_data;
3963
struct l2cap_chan *chan;
3964
3965
if (!conn)
3966
return 0;
3967
3968
BT_DBG("conn %p", conn);
3969
3970
read_lock(&conn->chan_lock);
3971
3972
list_for_each_entry(chan, &conn->chan_l, list) {
3973
struct sock *sk = chan->sk;
3974
3975
bh_lock_sock(sk);
3976
3977
if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
3978
bh_unlock_sock(sk);
3979
continue;
3980
}
3981
3982
if (!status && (sk->sk_state == BT_CONNECTED ||
3983
sk->sk_state == BT_CONFIG)) {
3984
l2cap_check_encryption(chan, encrypt);
3985
bh_unlock_sock(sk);
3986
continue;
3987
}
3988
3989
if (sk->sk_state == BT_CONNECT) {
3990
if (!status) {
3991
struct l2cap_conn_req req;
3992
req.scid = cpu_to_le16(chan->scid);
3993
req.psm = chan->psm;
3994
3995
chan->ident = l2cap_get_ident(conn);
3996
chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
3997
3998
l2cap_send_cmd(conn, chan->ident,
3999
L2CAP_CONN_REQ, sizeof(req), &req);
4000
} else {
4001
l2cap_sock_clear_timer(sk);
4002
l2cap_sock_set_timer(sk, HZ / 10);
4003
}
4004
} else if (sk->sk_state == BT_CONNECT2) {
4005
struct l2cap_conn_rsp rsp;
4006
__u16 res, stat;
4007
4008
if (!status) {
4009
if (bt_sk(sk)->defer_setup) {
4010
struct sock *parent = bt_sk(sk)->parent;
4011
res = L2CAP_CR_PEND;
4012
stat = L2CAP_CS_AUTHOR_PEND;
4013
if (parent)
4014
parent->sk_data_ready(parent, 0);
4015
} else {
4016
sk->sk_state = BT_CONFIG;
4017
res = L2CAP_CR_SUCCESS;
4018
stat = L2CAP_CS_NO_INFO;
4019
}
4020
} else {
4021
sk->sk_state = BT_DISCONN;
4022
l2cap_sock_set_timer(sk, HZ / 10);
4023
res = L2CAP_CR_SEC_BLOCK;
4024
stat = L2CAP_CS_NO_INFO;
4025
}
4026
4027
rsp.scid = cpu_to_le16(chan->dcid);
4028
rsp.dcid = cpu_to_le16(chan->scid);
4029
rsp.result = cpu_to_le16(res);
4030
rsp.status = cpu_to_le16(stat);
4031
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4032
sizeof(rsp), &rsp);
4033
}
4034
4035
bh_unlock_sock(sk);
4036
}
4037
4038
read_unlock(&conn->chan_lock);
4039
4040
return 0;
4041
}
4042
4043
static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4044
{
4045
struct l2cap_conn *conn = hcon->l2cap_data;
4046
4047
if (!conn)
4048
conn = l2cap_conn_add(hcon, 0);
4049
4050
if (!conn)
4051
goto drop;
4052
4053
BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4054
4055
if (!(flags & ACL_CONT)) {
4056
struct l2cap_hdr *hdr;
4057
struct l2cap_chan *chan;
4058
u16 cid;
4059
int len;
4060
4061
if (conn->rx_len) {
4062
BT_ERR("Unexpected start frame (len %d)", skb->len);
4063
kfree_skb(conn->rx_skb);
4064
conn->rx_skb = NULL;
4065
conn->rx_len = 0;
4066
l2cap_conn_unreliable(conn, ECOMM);
4067
}
4068
4069
/* Start fragment always begin with Basic L2CAP header */
4070
if (skb->len < L2CAP_HDR_SIZE) {
4071
BT_ERR("Frame is too short (len %d)", skb->len);
4072
l2cap_conn_unreliable(conn, ECOMM);
4073
goto drop;
4074
}
4075
4076
hdr = (struct l2cap_hdr *) skb->data;
4077
len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4078
cid = __le16_to_cpu(hdr->cid);
4079
4080
if (len == skb->len) {
4081
/* Complete frame received */
4082
l2cap_recv_frame(conn, skb);
4083
return 0;
4084
}
4085
4086
BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4087
4088
if (skb->len > len) {
4089
BT_ERR("Frame is too long (len %d, expected len %d)",
4090
skb->len, len);
4091
l2cap_conn_unreliable(conn, ECOMM);
4092
goto drop;
4093
}
4094
4095
chan = l2cap_get_chan_by_scid(conn, cid);
4096
4097
if (chan && chan->sk) {
4098
struct sock *sk = chan->sk;
4099
4100
if (chan->imtu < len - L2CAP_HDR_SIZE) {
4101
BT_ERR("Frame exceeding recv MTU (len %d, "
4102
"MTU %d)", len,
4103
chan->imtu);
4104
bh_unlock_sock(sk);
4105
l2cap_conn_unreliable(conn, ECOMM);
4106
goto drop;
4107
}
4108
bh_unlock_sock(sk);
4109
}
4110
4111
/* Allocate skb for the complete frame (with header) */
4112
conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4113
if (!conn->rx_skb)
4114
goto drop;
4115
4116
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4117
skb->len);
4118
conn->rx_len = len - skb->len;
4119
} else {
4120
BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4121
4122
if (!conn->rx_len) {
4123
BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4124
l2cap_conn_unreliable(conn, ECOMM);
4125
goto drop;
4126
}
4127
4128
if (skb->len > conn->rx_len) {
4129
BT_ERR("Fragment is too long (len %d, expected %d)",
4130
skb->len, conn->rx_len);
4131
kfree_skb(conn->rx_skb);
4132
conn->rx_skb = NULL;
4133
conn->rx_len = 0;
4134
l2cap_conn_unreliable(conn, ECOMM);
4135
goto drop;
4136
}
4137
4138
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4139
skb->len);
4140
conn->rx_len -= skb->len;
4141
4142
if (!conn->rx_len) {
4143
/* Complete frame received */
4144
l2cap_recv_frame(conn, conn->rx_skb);
4145
conn->rx_skb = NULL;
4146
}
4147
}
4148
4149
drop:
4150
kfree_skb(skb);
4151
return 0;
4152
}
4153
4154
static int l2cap_debugfs_show(struct seq_file *f, void *p)
4155
{
4156
struct l2cap_chan *c;
4157
4158
read_lock_bh(&chan_list_lock);
4159
4160
list_for_each_entry(c, &chan_list, global_l) {
4161
struct sock *sk = c->sk;
4162
4163
seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4164
batostr(&bt_sk(sk)->src),
4165
batostr(&bt_sk(sk)->dst),
4166
sk->sk_state, __le16_to_cpu(c->psm),
4167
c->scid, c->dcid, c->imtu, c->omtu,
4168
c->sec_level, c->mode);
4169
}
4170
4171
read_unlock_bh(&chan_list_lock);
4172
4173
return 0;
4174
}
4175
4176
static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4177
{
4178
return single_open(file, l2cap_debugfs_show, inode->i_private);
4179
}
4180
4181
static const struct file_operations l2cap_debugfs_fops = {
4182
.open = l2cap_debugfs_open,
4183
.read = seq_read,
4184
.llseek = seq_lseek,
4185
.release = single_release,
4186
};
4187
4188
static struct dentry *l2cap_debugfs;
4189
4190
static struct hci_proto l2cap_hci_proto = {
4191
.name = "L2CAP",
4192
.id = HCI_PROTO_L2CAP,
4193
.connect_ind = l2cap_connect_ind,
4194
.connect_cfm = l2cap_connect_cfm,
4195
.disconn_ind = l2cap_disconn_ind,
4196
.disconn_cfm = l2cap_disconn_cfm,
4197
.security_cfm = l2cap_security_cfm,
4198
.recv_acldata = l2cap_recv_acldata
4199
};
4200
4201
int __init l2cap_init(void)
4202
{
4203
int err;
4204
4205
err = l2cap_init_sockets();
4206
if (err < 0)
4207
return err;
4208
4209
_busy_wq = create_singlethread_workqueue("l2cap");
4210
if (!_busy_wq) {
4211
err = -ENOMEM;
4212
goto error;
4213
}
4214
4215
err = hci_register_proto(&l2cap_hci_proto);
4216
if (err < 0) {
4217
BT_ERR("L2CAP protocol registration failed");
4218
bt_sock_unregister(BTPROTO_L2CAP);
4219
goto error;
4220
}
4221
4222
if (bt_debugfs) {
4223
l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4224
bt_debugfs, NULL, &l2cap_debugfs_fops);
4225
if (!l2cap_debugfs)
4226
BT_ERR("Failed to create L2CAP debug file");
4227
}
4228
4229
return 0;
4230
4231
error:
4232
destroy_workqueue(_busy_wq);
4233
l2cap_cleanup_sockets();
4234
return err;
4235
}
4236
4237
void l2cap_exit(void)
4238
{
4239
debugfs_remove(l2cap_debugfs);
4240
4241
flush_workqueue(_busy_wq);
4242
destroy_workqueue(_busy_wq);
4243
4244
if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4245
BT_ERR("L2CAP protocol unregistration failed");
4246
4247
l2cap_cleanup_sockets();
4248
}
4249
4250
module_param(disable_ertm, bool, 0644);
4251
MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4252
4253