Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/bluetooth/l2cap_core.c
49317 views
1
/*
2
BlueZ - Bluetooth protocol stack for Linux
3
Copyright (C) 2000-2001 Qualcomm Incorporated
4
Copyright (C) 2009-2010 Gustavo F. Padovan <[email protected]>
5
Copyright (C) 2010 Google Inc.
6
Copyright (C) 2011 ProFUSION Embedded Systems
7
Copyright (c) 2012 Code Aurora Forum. All rights reserved.
8
9
Written 2000,2001 by Maxim Krasnyansky <[email protected]>
10
11
This program is free software; you can redistribute it and/or modify
12
it under the terms of the GNU General Public License version 2 as
13
published by the Free Software Foundation;
14
15
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
24
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26
SOFTWARE IS DISCLAIMED.
27
*/
28
29
/* Bluetooth L2CAP core. */
30
31
#include <linux/module.h>
32
33
#include <linux/debugfs.h>
34
#include <linux/crc16.h>
35
#include <linux/filter.h>
36
37
#include <net/bluetooth/bluetooth.h>
38
#include <net/bluetooth/hci_core.h>
39
#include <net/bluetooth/l2cap.h>
40
41
#include "smp.h"
42
43
#define LE_FLOWCTL_MAX_CREDITS 65535
44
45
bool disable_ertm;
46
bool enable_ecred = IS_ENABLED(CONFIG_BT_LE_L2CAP_ECRED);
47
48
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49
50
static LIST_HEAD(chan_list);
51
static DEFINE_RWLOCK(chan_list_lock);
52
53
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
54
u8 code, u8 ident, u16 dlen, void *data);
55
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
56
void *data);
57
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
58
static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
59
60
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
61
struct sk_buff_head *skbs, u8 event);
62
static void l2cap_retrans_timeout(struct work_struct *work);
63
static void l2cap_monitor_timeout(struct work_struct *work);
64
static void l2cap_ack_timeout(struct work_struct *work);
65
66
static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
67
{
68
if (link_type == LE_LINK) {
69
if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
70
return BDADDR_LE_PUBLIC;
71
else
72
return BDADDR_LE_RANDOM;
73
}
74
75
return BDADDR_BREDR;
76
}
77
78
static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79
{
80
return bdaddr_type(hcon->type, hcon->src_type);
81
}
82
83
static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84
{
85
return bdaddr_type(hcon->type, hcon->dst_type);
86
}
87
88
/* ---- L2CAP channels ---- */
89
90
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91
u16 cid)
92
{
93
struct l2cap_chan *c;
94
95
list_for_each_entry(c, &conn->chan_l, list) {
96
if (c->dcid == cid)
97
return c;
98
}
99
return NULL;
100
}
101
102
static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103
u16 cid)
104
{
105
struct l2cap_chan *c;
106
107
list_for_each_entry(c, &conn->chan_l, list) {
108
if (c->scid == cid)
109
return c;
110
}
111
return NULL;
112
}
113
114
/* Find channel with given SCID.
115
* Returns a reference locked channel.
116
*/
117
static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118
u16 cid)
119
{
120
struct l2cap_chan *c;
121
122
c = __l2cap_get_chan_by_scid(conn, cid);
123
if (c) {
124
/* Only lock if chan reference is not 0 */
125
c = l2cap_chan_hold_unless_zero(c);
126
if (c)
127
l2cap_chan_lock(c);
128
}
129
130
return c;
131
}
132
133
/* Find channel with given DCID.
134
* Returns a reference locked channel.
135
*/
136
static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
137
u16 cid)
138
{
139
struct l2cap_chan *c;
140
141
c = __l2cap_get_chan_by_dcid(conn, cid);
142
if (c) {
143
/* Only lock if chan reference is not 0 */
144
c = l2cap_chan_hold_unless_zero(c);
145
if (c)
146
l2cap_chan_lock(c);
147
}
148
149
return c;
150
}
151
152
static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
153
u8 ident)
154
{
155
struct l2cap_chan *c;
156
157
list_for_each_entry(c, &conn->chan_l, list) {
158
if (c->ident == ident)
159
return c;
160
}
161
return NULL;
162
}
163
164
static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
165
u8 src_type)
166
{
167
struct l2cap_chan *c;
168
169
list_for_each_entry(c, &chan_list, global_l) {
170
if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
171
continue;
172
173
if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
174
continue;
175
176
if (c->sport == psm && !bacmp(&c->src, src))
177
return c;
178
}
179
return NULL;
180
}
181
182
int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
183
{
184
int err;
185
186
write_lock(&chan_list_lock);
187
188
if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
189
err = -EADDRINUSE;
190
goto done;
191
}
192
193
if (psm) {
194
chan->psm = psm;
195
chan->sport = psm;
196
err = 0;
197
} else {
198
u16 p, start, end, incr;
199
200
if (chan->src_type == BDADDR_BREDR) {
201
start = L2CAP_PSM_DYN_START;
202
end = L2CAP_PSM_AUTO_END;
203
incr = 2;
204
} else {
205
start = L2CAP_PSM_LE_DYN_START;
206
end = L2CAP_PSM_LE_DYN_END;
207
incr = 1;
208
}
209
210
err = -EINVAL;
211
for (p = start; p <= end; p += incr)
212
if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
213
chan->src_type)) {
214
chan->psm = cpu_to_le16(p);
215
chan->sport = cpu_to_le16(p);
216
err = 0;
217
break;
218
}
219
}
220
221
done:
222
write_unlock(&chan_list_lock);
223
return err;
224
}
225
EXPORT_SYMBOL_GPL(l2cap_add_psm);
226
227
int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
228
{
229
write_lock(&chan_list_lock);
230
231
/* Override the defaults (which are for conn-oriented) */
232
chan->omtu = L2CAP_DEFAULT_MTU;
233
chan->chan_type = L2CAP_CHAN_FIXED;
234
235
chan->scid = scid;
236
237
write_unlock(&chan_list_lock);
238
239
return 0;
240
}
241
242
static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
243
{
244
u16 cid, dyn_end;
245
246
if (conn->hcon->type == LE_LINK)
247
dyn_end = L2CAP_CID_LE_DYN_END;
248
else
249
dyn_end = L2CAP_CID_DYN_END;
250
251
for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
252
if (!__l2cap_get_chan_by_scid(conn, cid))
253
return cid;
254
}
255
256
return 0;
257
}
258
259
static void l2cap_state_change(struct l2cap_chan *chan, int state)
260
{
261
BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
262
state_to_string(state));
263
264
chan->state = state;
265
chan->ops->state_change(chan, state, 0);
266
}
267
268
static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
269
int state, int err)
270
{
271
chan->state = state;
272
chan->ops->state_change(chan, chan->state, err);
273
}
274
275
static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
276
{
277
chan->ops->state_change(chan, chan->state, err);
278
}
279
280
static void __set_retrans_timer(struct l2cap_chan *chan)
281
{
282
if (!delayed_work_pending(&chan->monitor_timer) &&
283
chan->retrans_timeout) {
284
l2cap_set_timer(chan, &chan->retrans_timer,
285
msecs_to_jiffies(chan->retrans_timeout));
286
}
287
}
288
289
static void __set_monitor_timer(struct l2cap_chan *chan)
290
{
291
__clear_retrans_timer(chan);
292
if (chan->monitor_timeout) {
293
l2cap_set_timer(chan, &chan->monitor_timer,
294
msecs_to_jiffies(chan->monitor_timeout));
295
}
296
}
297
298
static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
299
u16 seq)
300
{
301
struct sk_buff *skb;
302
303
skb_queue_walk(head, skb) {
304
if (bt_cb(skb)->l2cap.txseq == seq)
305
return skb;
306
}
307
308
return NULL;
309
}
310
311
/* ---- L2CAP sequence number lists ---- */
312
313
/* For ERTM, ordered lists of sequence numbers must be tracked for
314
* SREJ requests that are received and for frames that are to be
315
* retransmitted. These seq_list functions implement a singly-linked
316
* list in an array, where membership in the list can also be checked
317
* in constant time. Items can also be added to the tail of the list
318
* and removed from the head in constant time, without further memory
319
* allocs or frees.
320
*/
321
322
static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
323
{
324
size_t alloc_size, i;
325
326
/* Allocated size is a power of 2 to map sequence numbers
327
* (which may be up to 14 bits) in to a smaller array that is
328
* sized for the negotiated ERTM transmit windows.
329
*/
330
alloc_size = roundup_pow_of_two(size);
331
332
seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
333
if (!seq_list->list)
334
return -ENOMEM;
335
336
seq_list->mask = alloc_size - 1;
337
seq_list->head = L2CAP_SEQ_LIST_CLEAR;
338
seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
339
for (i = 0; i < alloc_size; i++)
340
seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
341
342
return 0;
343
}
344
345
static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
346
{
347
kfree(seq_list->list);
348
}
349
350
static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
351
u16 seq)
352
{
353
/* Constant-time check for list membership */
354
return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
355
}
356
357
static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
358
{
359
u16 seq = seq_list->head;
360
u16 mask = seq_list->mask;
361
362
seq_list->head = seq_list->list[seq & mask];
363
seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
364
365
if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
366
seq_list->head = L2CAP_SEQ_LIST_CLEAR;
367
seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
368
}
369
370
return seq;
371
}
372
373
static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
374
{
375
u16 i;
376
377
if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
378
return;
379
380
for (i = 0; i <= seq_list->mask; i++)
381
seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
382
383
seq_list->head = L2CAP_SEQ_LIST_CLEAR;
384
seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
385
}
386
387
static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
388
{
389
u16 mask = seq_list->mask;
390
391
/* All appends happen in constant time */
392
393
if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
394
return;
395
396
if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
397
seq_list->head = seq;
398
else
399
seq_list->list[seq_list->tail & mask] = seq;
400
401
seq_list->tail = seq;
402
seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
403
}
404
405
static void l2cap_chan_timeout(struct work_struct *work)
406
{
407
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
408
chan_timer.work);
409
struct l2cap_conn *conn = chan->conn;
410
int reason;
411
412
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
413
414
if (!conn)
415
return;
416
417
mutex_lock(&conn->lock);
418
/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
419
* this work. No need to call l2cap_chan_hold(chan) here again.
420
*/
421
l2cap_chan_lock(chan);
422
423
if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
424
reason = ECONNREFUSED;
425
else if (chan->state == BT_CONNECT &&
426
chan->sec_level != BT_SECURITY_SDP)
427
reason = ECONNREFUSED;
428
else
429
reason = ETIMEDOUT;
430
431
l2cap_chan_close(chan, reason);
432
433
chan->ops->close(chan);
434
435
l2cap_chan_unlock(chan);
436
l2cap_chan_put(chan);
437
438
mutex_unlock(&conn->lock);
439
}
440
441
struct l2cap_chan *l2cap_chan_create(void)
442
{
443
struct l2cap_chan *chan;
444
445
chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
446
if (!chan)
447
return NULL;
448
449
skb_queue_head_init(&chan->tx_q);
450
skb_queue_head_init(&chan->srej_q);
451
mutex_init(&chan->lock);
452
453
/* Set default lock nesting level */
454
atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
455
456
/* Available receive buffer space is initially unknown */
457
chan->rx_avail = -1;
458
459
write_lock(&chan_list_lock);
460
list_add(&chan->global_l, &chan_list);
461
write_unlock(&chan_list_lock);
462
463
INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
464
INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
465
INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
466
INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
467
468
chan->state = BT_OPEN;
469
470
kref_init(&chan->kref);
471
472
/* This flag is cleared in l2cap_chan_ready() */
473
set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
474
475
BT_DBG("chan %p", chan);
476
477
return chan;
478
}
479
EXPORT_SYMBOL_GPL(l2cap_chan_create);
480
481
static void l2cap_chan_destroy(struct kref *kref)
482
{
483
struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
484
485
BT_DBG("chan %p", chan);
486
487
write_lock(&chan_list_lock);
488
list_del(&chan->global_l);
489
write_unlock(&chan_list_lock);
490
491
kfree(chan);
492
}
493
494
void l2cap_chan_hold(struct l2cap_chan *c)
495
{
496
BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
497
498
kref_get(&c->kref);
499
}
500
EXPORT_SYMBOL_GPL(l2cap_chan_hold);
501
502
struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
503
{
504
BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
505
506
if (!kref_get_unless_zero(&c->kref))
507
return NULL;
508
509
return c;
510
}
511
512
void l2cap_chan_put(struct l2cap_chan *c)
513
{
514
BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515
516
kref_put(&c->kref, l2cap_chan_destroy);
517
}
518
EXPORT_SYMBOL_GPL(l2cap_chan_put);
519
520
void l2cap_chan_set_defaults(struct l2cap_chan *chan)
521
{
522
chan->fcs = L2CAP_FCS_CRC16;
523
chan->max_tx = L2CAP_DEFAULT_MAX_TX;
524
chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
525
chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
526
chan->remote_max_tx = chan->max_tx;
527
chan->remote_tx_win = chan->tx_win;
528
chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
529
chan->sec_level = BT_SECURITY_LOW;
530
chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
531
chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
532
chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
533
534
chan->conf_state = 0;
535
set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
536
537
set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
538
}
539
EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
540
541
static __u16 l2cap_le_rx_credits(struct l2cap_chan *chan)
542
{
543
size_t sdu_len = chan->sdu ? chan->sdu->len : 0;
544
545
if (chan->mps == 0)
546
return 0;
547
548
/* If we don't know the available space in the receiver buffer, give
549
* enough credits for a full packet.
550
*/
551
if (chan->rx_avail == -1)
552
return (chan->imtu / chan->mps) + 1;
553
554
/* If we know how much space is available in the receive buffer, give
555
* out as many credits as would fill the buffer.
556
*/
557
if (chan->rx_avail <= sdu_len)
558
return 0;
559
560
return DIV_ROUND_UP(chan->rx_avail - sdu_len, chan->mps);
561
}
562
563
static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
564
{
565
chan->sdu = NULL;
566
chan->sdu_last_frag = NULL;
567
chan->sdu_len = 0;
568
chan->tx_credits = tx_credits;
569
/* Derive MPS from connection MTU to stop HCI fragmentation */
570
chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
571
chan->rx_credits = l2cap_le_rx_credits(chan);
572
573
skb_queue_head_init(&chan->tx_q);
574
}
575
576
static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
577
{
578
l2cap_le_flowctl_init(chan, tx_credits);
579
580
/* L2CAP implementations shall support a minimum MPS of 64 octets */
581
if (chan->mps < L2CAP_ECRED_MIN_MPS) {
582
chan->mps = L2CAP_ECRED_MIN_MPS;
583
chan->rx_credits = l2cap_le_rx_credits(chan);
584
}
585
}
586
587
void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
588
{
589
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
590
__le16_to_cpu(chan->psm), chan->dcid);
591
592
conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
593
594
chan->conn = conn;
595
596
switch (chan->chan_type) {
597
case L2CAP_CHAN_CONN_ORIENTED:
598
/* Alloc CID for connection-oriented socket */
599
chan->scid = l2cap_alloc_cid(conn);
600
if (conn->hcon->type == ACL_LINK)
601
chan->omtu = L2CAP_DEFAULT_MTU;
602
break;
603
604
case L2CAP_CHAN_CONN_LESS:
605
/* Connectionless socket */
606
chan->scid = L2CAP_CID_CONN_LESS;
607
chan->dcid = L2CAP_CID_CONN_LESS;
608
chan->omtu = L2CAP_DEFAULT_MTU;
609
break;
610
611
case L2CAP_CHAN_FIXED:
612
/* Caller will set CID and CID specific MTU values */
613
break;
614
615
default:
616
/* Raw socket can send/recv signalling messages only */
617
chan->scid = L2CAP_CID_SIGNALING;
618
chan->dcid = L2CAP_CID_SIGNALING;
619
chan->omtu = L2CAP_DEFAULT_MTU;
620
}
621
622
chan->local_id = L2CAP_BESTEFFORT_ID;
623
chan->local_stype = L2CAP_SERV_BESTEFFORT;
624
chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
625
chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
626
chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
627
chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
628
629
l2cap_chan_hold(chan);
630
631
/* Only keep a reference for fixed channels if they requested it */
632
if (chan->chan_type != L2CAP_CHAN_FIXED ||
633
test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
634
hci_conn_hold(conn->hcon);
635
636
/* Append to the list since the order matters for ECRED */
637
list_add_tail(&chan->list, &conn->chan_l);
638
}
639
640
void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
641
{
642
mutex_lock(&conn->lock);
643
__l2cap_chan_add(conn, chan);
644
mutex_unlock(&conn->lock);
645
}
646
647
void l2cap_chan_del(struct l2cap_chan *chan, int err)
648
{
649
struct l2cap_conn *conn = chan->conn;
650
651
__clear_chan_timer(chan);
652
653
BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
654
state_to_string(chan->state));
655
656
chan->ops->teardown(chan, err);
657
658
if (conn) {
659
/* Delete from channel list */
660
list_del(&chan->list);
661
662
l2cap_chan_put(chan);
663
664
chan->conn = NULL;
665
666
/* Reference was only held for non-fixed channels or
667
* fixed channels that explicitly requested it using the
668
* FLAG_HOLD_HCI_CONN flag.
669
*/
670
if (chan->chan_type != L2CAP_CHAN_FIXED ||
671
test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
672
hci_conn_drop(conn->hcon);
673
}
674
675
if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
676
return;
677
678
switch (chan->mode) {
679
case L2CAP_MODE_BASIC:
680
break;
681
682
case L2CAP_MODE_LE_FLOWCTL:
683
case L2CAP_MODE_EXT_FLOWCTL:
684
skb_queue_purge(&chan->tx_q);
685
break;
686
687
case L2CAP_MODE_ERTM:
688
__clear_retrans_timer(chan);
689
__clear_monitor_timer(chan);
690
__clear_ack_timer(chan);
691
692
skb_queue_purge(&chan->srej_q);
693
694
l2cap_seq_list_free(&chan->srej_list);
695
l2cap_seq_list_free(&chan->retrans_list);
696
fallthrough;
697
698
case L2CAP_MODE_STREAMING:
699
skb_queue_purge(&chan->tx_q);
700
break;
701
}
702
}
703
EXPORT_SYMBOL_GPL(l2cap_chan_del);
704
705
static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
706
l2cap_chan_func_t func, void *data)
707
{
708
struct l2cap_chan *chan, *l;
709
710
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
711
if (chan->ident == id)
712
func(chan, data);
713
}
714
}
715
716
static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
717
void *data)
718
{
719
struct l2cap_chan *chan;
720
721
list_for_each_entry(chan, &conn->chan_l, list) {
722
func(chan, data);
723
}
724
}
725
726
void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
727
void *data)
728
{
729
if (!conn)
730
return;
731
732
mutex_lock(&conn->lock);
733
__l2cap_chan_list(conn, func, data);
734
mutex_unlock(&conn->lock);
735
}
736
737
EXPORT_SYMBOL_GPL(l2cap_chan_list);
738
739
static void l2cap_conn_update_id_addr(struct work_struct *work)
740
{
741
struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
742
id_addr_timer.work);
743
struct hci_conn *hcon = conn->hcon;
744
struct l2cap_chan *chan;
745
746
mutex_lock(&conn->lock);
747
748
list_for_each_entry(chan, &conn->chan_l, list) {
749
l2cap_chan_lock(chan);
750
bacpy(&chan->dst, &hcon->dst);
751
chan->dst_type = bdaddr_dst_type(hcon);
752
l2cap_chan_unlock(chan);
753
}
754
755
mutex_unlock(&conn->lock);
756
}
757
758
static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
759
{
760
struct l2cap_conn *conn = chan->conn;
761
struct l2cap_le_conn_rsp rsp;
762
u16 result;
763
764
if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
765
result = L2CAP_CR_LE_AUTHORIZATION;
766
else
767
result = L2CAP_CR_LE_BAD_PSM;
768
769
l2cap_state_change(chan, BT_DISCONN);
770
771
rsp.dcid = cpu_to_le16(chan->scid);
772
rsp.mtu = cpu_to_le16(chan->imtu);
773
rsp.mps = cpu_to_le16(chan->mps);
774
rsp.credits = cpu_to_le16(chan->rx_credits);
775
rsp.result = cpu_to_le16(result);
776
777
l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
778
&rsp);
779
}
780
781
static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
782
{
783
l2cap_state_change(chan, BT_DISCONN);
784
785
__l2cap_ecred_conn_rsp_defer(chan);
786
}
787
788
static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
789
{
790
struct l2cap_conn *conn = chan->conn;
791
struct l2cap_conn_rsp rsp;
792
u16 result;
793
794
if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
795
result = L2CAP_CR_SEC_BLOCK;
796
else
797
result = L2CAP_CR_BAD_PSM;
798
799
l2cap_state_change(chan, BT_DISCONN);
800
801
rsp.scid = cpu_to_le16(chan->dcid);
802
rsp.dcid = cpu_to_le16(chan->scid);
803
rsp.result = cpu_to_le16(result);
804
rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805
806
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
807
}
808
809
void l2cap_chan_close(struct l2cap_chan *chan, int reason)
810
{
811
struct l2cap_conn *conn = chan->conn;
812
813
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
814
815
switch (chan->state) {
816
case BT_LISTEN:
817
chan->ops->teardown(chan, 0);
818
break;
819
820
case BT_CONNECTED:
821
case BT_CONFIG:
822
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
823
__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
824
l2cap_send_disconn_req(chan, reason);
825
} else
826
l2cap_chan_del(chan, reason);
827
break;
828
829
case BT_CONNECT2:
830
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
831
if (conn->hcon->type == ACL_LINK)
832
l2cap_chan_connect_reject(chan);
833
else if (conn->hcon->type == LE_LINK) {
834
switch (chan->mode) {
835
case L2CAP_MODE_LE_FLOWCTL:
836
l2cap_chan_le_connect_reject(chan);
837
break;
838
case L2CAP_MODE_EXT_FLOWCTL:
839
l2cap_chan_ecred_connect_reject(chan);
840
return;
841
}
842
}
843
}
844
845
l2cap_chan_del(chan, reason);
846
break;
847
848
case BT_CONNECT:
849
case BT_DISCONN:
850
l2cap_chan_del(chan, reason);
851
break;
852
853
default:
854
chan->ops->teardown(chan, 0);
855
break;
856
}
857
}
858
EXPORT_SYMBOL(l2cap_chan_close);
859
860
static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
861
{
862
switch (chan->chan_type) {
863
case L2CAP_CHAN_RAW:
864
switch (chan->sec_level) {
865
case BT_SECURITY_HIGH:
866
case BT_SECURITY_FIPS:
867
return HCI_AT_DEDICATED_BONDING_MITM;
868
case BT_SECURITY_MEDIUM:
869
return HCI_AT_DEDICATED_BONDING;
870
default:
871
return HCI_AT_NO_BONDING;
872
}
873
break;
874
case L2CAP_CHAN_CONN_LESS:
875
if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
876
if (chan->sec_level == BT_SECURITY_LOW)
877
chan->sec_level = BT_SECURITY_SDP;
878
}
879
if (chan->sec_level == BT_SECURITY_HIGH ||
880
chan->sec_level == BT_SECURITY_FIPS)
881
return HCI_AT_NO_BONDING_MITM;
882
else
883
return HCI_AT_NO_BONDING;
884
break;
885
case L2CAP_CHAN_CONN_ORIENTED:
886
if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
887
if (chan->sec_level == BT_SECURITY_LOW)
888
chan->sec_level = BT_SECURITY_SDP;
889
890
if (chan->sec_level == BT_SECURITY_HIGH ||
891
chan->sec_level == BT_SECURITY_FIPS)
892
return HCI_AT_NO_BONDING_MITM;
893
else
894
return HCI_AT_NO_BONDING;
895
}
896
fallthrough;
897
898
default:
899
switch (chan->sec_level) {
900
case BT_SECURITY_HIGH:
901
case BT_SECURITY_FIPS:
902
return HCI_AT_GENERAL_BONDING_MITM;
903
case BT_SECURITY_MEDIUM:
904
return HCI_AT_GENERAL_BONDING;
905
default:
906
return HCI_AT_NO_BONDING;
907
}
908
break;
909
}
910
}
911
912
/* Service level security */
913
int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
914
{
915
struct l2cap_conn *conn = chan->conn;
916
__u8 auth_type;
917
918
if (conn->hcon->type == LE_LINK)
919
return smp_conn_security(conn->hcon, chan->sec_level);
920
921
auth_type = l2cap_get_auth_type(chan);
922
923
return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
924
initiator);
925
}
926
927
static u8 l2cap_get_ident(struct l2cap_conn *conn)
928
{
929
u8 id;
930
931
/* Get next available identificator.
932
* 1 - 128 are used by kernel.
933
* 129 - 199 are reserved.
934
* 200 - 254 are used by utilities like l2ping, etc.
935
*/
936
937
mutex_lock(&conn->ident_lock);
938
939
if (++conn->tx_ident > 128)
940
conn->tx_ident = 1;
941
942
id = conn->tx_ident;
943
944
mutex_unlock(&conn->ident_lock);
945
946
return id;
947
}
948
949
static void l2cap_send_acl(struct l2cap_conn *conn, struct sk_buff *skb,
950
u8 flags)
951
{
952
/* Check if the hcon still valid before attempting to send */
953
if (hci_conn_valid(conn->hcon->hdev, conn->hcon))
954
hci_send_acl(conn->hchan, skb, flags);
955
else
956
kfree_skb(skb);
957
}
958
959
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
960
void *data)
961
{
962
struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
963
u8 flags;
964
965
BT_DBG("code 0x%2.2x", code);
966
967
if (!skb)
968
return;
969
970
/* Use NO_FLUSH if supported or we have an LE link (which does
971
* not support auto-flushing packets) */
972
if (lmp_no_flush_capable(conn->hcon->hdev) ||
973
conn->hcon->type == LE_LINK)
974
flags = ACL_START_NO_FLUSH;
975
else
976
flags = ACL_START;
977
978
bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
979
skb->priority = HCI_PRIO_MAX;
980
981
l2cap_send_acl(conn, skb, flags);
982
}
983
984
static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
985
{
986
struct hci_conn *hcon = chan->conn->hcon;
987
u16 flags;
988
989
BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
990
skb->priority);
991
992
/* Use NO_FLUSH for LE links (where this is the only option) or
993
* if the BR/EDR link supports it and flushing has not been
994
* explicitly requested (through FLAG_FLUSHABLE).
995
*/
996
if (hcon->type == LE_LINK ||
997
(!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
998
lmp_no_flush_capable(hcon->hdev)))
999
flags = ACL_START_NO_FLUSH;
1000
else
1001
flags = ACL_START;
1002
1003
bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1004
hci_send_acl(chan->conn->hchan, skb, flags);
1005
}
1006
1007
static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1008
{
1009
control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1010
control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1011
1012
if (enh & L2CAP_CTRL_FRAME_TYPE) {
1013
/* S-Frame */
1014
control->sframe = 1;
1015
control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1016
control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1017
1018
control->sar = 0;
1019
control->txseq = 0;
1020
} else {
1021
/* I-Frame */
1022
control->sframe = 0;
1023
control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1024
control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1025
1026
control->poll = 0;
1027
control->super = 0;
1028
}
1029
}
1030
1031
static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1032
{
1033
control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1034
control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1035
1036
if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1037
/* S-Frame */
1038
control->sframe = 1;
1039
control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1040
control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1041
1042
control->sar = 0;
1043
control->txseq = 0;
1044
} else {
1045
/* I-Frame */
1046
control->sframe = 0;
1047
control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1048
control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1049
1050
control->poll = 0;
1051
control->super = 0;
1052
}
1053
}
1054
1055
static inline void __unpack_control(struct l2cap_chan *chan,
1056
struct sk_buff *skb)
1057
{
1058
if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1059
__unpack_extended_control(get_unaligned_le32(skb->data),
1060
&bt_cb(skb)->l2cap);
1061
skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1062
} else {
1063
__unpack_enhanced_control(get_unaligned_le16(skb->data),
1064
&bt_cb(skb)->l2cap);
1065
skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1066
}
1067
}
1068
1069
static u32 __pack_extended_control(struct l2cap_ctrl *control)
1070
{
1071
u32 packed;
1072
1073
packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1074
packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1075
1076
if (control->sframe) {
1077
packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1078
packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1079
packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1080
} else {
1081
packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1082
packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1083
}
1084
1085
return packed;
1086
}
1087
1088
static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1089
{
1090
u16 packed;
1091
1092
packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1093
packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1094
1095
if (control->sframe) {
1096
packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1097
packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1098
packed |= L2CAP_CTRL_FRAME_TYPE;
1099
} else {
1100
packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1101
packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1102
}
1103
1104
return packed;
1105
}
1106
1107
static inline void __pack_control(struct l2cap_chan *chan,
1108
struct l2cap_ctrl *control,
1109
struct sk_buff *skb)
1110
{
1111
if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1112
put_unaligned_le32(__pack_extended_control(control),
1113
skb->data + L2CAP_HDR_SIZE);
1114
} else {
1115
put_unaligned_le16(__pack_enhanced_control(control),
1116
skb->data + L2CAP_HDR_SIZE);
1117
}
1118
}
1119
1120
static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1121
{
1122
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1123
return L2CAP_EXT_HDR_SIZE;
1124
else
1125
return L2CAP_ENH_HDR_SIZE;
1126
}
1127
1128
static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1129
u32 control)
1130
{
1131
struct sk_buff *skb;
1132
struct l2cap_hdr *lh;
1133
int hlen = __ertm_hdr_size(chan);
1134
1135
if (chan->fcs == L2CAP_FCS_CRC16)
1136
hlen += L2CAP_FCS_SIZE;
1137
1138
skb = bt_skb_alloc(hlen, GFP_KERNEL);
1139
1140
if (!skb)
1141
return ERR_PTR(-ENOMEM);
1142
1143
lh = skb_put(skb, L2CAP_HDR_SIZE);
1144
lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1145
lh->cid = cpu_to_le16(chan->dcid);
1146
1147
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1148
put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1149
else
1150
put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1151
1152
if (chan->fcs == L2CAP_FCS_CRC16) {
1153
u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1154
put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1155
}
1156
1157
skb->priority = HCI_PRIO_MAX;
1158
return skb;
1159
}
1160
1161
static void l2cap_send_sframe(struct l2cap_chan *chan,
1162
struct l2cap_ctrl *control)
1163
{
1164
struct sk_buff *skb;
1165
u32 control_field;
1166
1167
BT_DBG("chan %p, control %p", chan, control);
1168
1169
if (!control->sframe)
1170
return;
1171
1172
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1173
!control->poll)
1174
control->final = 1;
1175
1176
if (control->super == L2CAP_SUPER_RR)
1177
clear_bit(CONN_RNR_SENT, &chan->conn_state);
1178
else if (control->super == L2CAP_SUPER_RNR)
1179
set_bit(CONN_RNR_SENT, &chan->conn_state);
1180
1181
if (control->super != L2CAP_SUPER_SREJ) {
1182
chan->last_acked_seq = control->reqseq;
1183
__clear_ack_timer(chan);
1184
}
1185
1186
BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1187
control->final, control->poll, control->super);
1188
1189
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1190
control_field = __pack_extended_control(control);
1191
else
1192
control_field = __pack_enhanced_control(control);
1193
1194
skb = l2cap_create_sframe_pdu(chan, control_field);
1195
if (!IS_ERR(skb))
1196
l2cap_do_send(chan, skb);
1197
}
1198
1199
static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1200
{
1201
struct l2cap_ctrl control;
1202
1203
BT_DBG("chan %p, poll %d", chan, poll);
1204
1205
memset(&control, 0, sizeof(control));
1206
control.sframe = 1;
1207
control.poll = poll;
1208
1209
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1210
control.super = L2CAP_SUPER_RNR;
1211
else
1212
control.super = L2CAP_SUPER_RR;
1213
1214
control.reqseq = chan->buffer_seq;
1215
l2cap_send_sframe(chan, &control);
1216
}
1217
1218
static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1219
{
1220
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1221
return true;
1222
1223
return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1224
}
1225
1226
void l2cap_send_conn_req(struct l2cap_chan *chan)
1227
{
1228
struct l2cap_conn *conn = chan->conn;
1229
struct l2cap_conn_req req;
1230
1231
req.scid = cpu_to_le16(chan->scid);
1232
req.psm = chan->psm;
1233
1234
chan->ident = l2cap_get_ident(conn);
1235
1236
set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1237
1238
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1239
}
1240
1241
static void l2cap_chan_ready(struct l2cap_chan *chan)
1242
{
1243
/* The channel may have already been flagged as connected in
1244
* case of receiving data before the L2CAP info req/rsp
1245
* procedure is complete.
1246
*/
1247
if (chan->state == BT_CONNECTED)
1248
return;
1249
1250
/* This clears all conf flags, including CONF_NOT_COMPLETE */
1251
chan->conf_state = 0;
1252
__clear_chan_timer(chan);
1253
1254
switch (chan->mode) {
1255
case L2CAP_MODE_LE_FLOWCTL:
1256
case L2CAP_MODE_EXT_FLOWCTL:
1257
if (!chan->tx_credits)
1258
chan->ops->suspend(chan);
1259
break;
1260
}
1261
1262
chan->state = BT_CONNECTED;
1263
1264
chan->ops->ready(chan);
1265
}
1266
1267
static void l2cap_le_connect(struct l2cap_chan *chan)
1268
{
1269
struct l2cap_conn *conn = chan->conn;
1270
struct l2cap_le_conn_req req;
1271
1272
if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1273
return;
1274
1275
if (!chan->imtu)
1276
chan->imtu = chan->conn->mtu;
1277
1278
l2cap_le_flowctl_init(chan, 0);
1279
1280
memset(&req, 0, sizeof(req));
1281
req.psm = chan->psm;
1282
req.scid = cpu_to_le16(chan->scid);
1283
req.mtu = cpu_to_le16(chan->imtu);
1284
req.mps = cpu_to_le16(chan->mps);
1285
req.credits = cpu_to_le16(chan->rx_credits);
1286
1287
chan->ident = l2cap_get_ident(conn);
1288
1289
l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1290
sizeof(req), &req);
1291
}
1292
1293
struct l2cap_ecred_conn_data {
1294
struct {
1295
struct l2cap_ecred_conn_req_hdr req;
1296
__le16 scid[5];
1297
} __packed pdu;
1298
struct l2cap_chan *chan;
1299
struct pid *pid;
1300
int count;
1301
};
1302
1303
static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1304
{
1305
struct l2cap_ecred_conn_data *conn = data;
1306
struct pid *pid;
1307
1308
if (chan == conn->chan)
1309
return;
1310
1311
if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1312
return;
1313
1314
pid = chan->ops->get_peer_pid(chan);
1315
1316
/* Only add deferred channels with the same PID/PSM */
1317
if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1318
chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1319
return;
1320
1321
if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1322
return;
1323
1324
l2cap_ecred_init(chan, 0);
1325
1326
/* Set the same ident so we can match on the rsp */
1327
chan->ident = conn->chan->ident;
1328
1329
/* Include all channels deferred */
1330
conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1331
1332
conn->count++;
1333
}
1334
1335
static void l2cap_ecred_connect(struct l2cap_chan *chan)
1336
{
1337
struct l2cap_conn *conn = chan->conn;
1338
struct l2cap_ecred_conn_data data;
1339
1340
if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1341
return;
1342
1343
if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1344
return;
1345
1346
l2cap_ecred_init(chan, 0);
1347
1348
memset(&data, 0, sizeof(data));
1349
data.pdu.req.psm = chan->psm;
1350
data.pdu.req.mtu = cpu_to_le16(chan->imtu);
1351
data.pdu.req.mps = cpu_to_le16(chan->mps);
1352
data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1353
data.pdu.scid[0] = cpu_to_le16(chan->scid);
1354
1355
chan->ident = l2cap_get_ident(conn);
1356
1357
data.count = 1;
1358
data.chan = chan;
1359
data.pid = chan->ops->get_peer_pid(chan);
1360
1361
__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1362
1363
l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1364
sizeof(data.pdu.req) + data.count * sizeof(__le16),
1365
&data.pdu);
1366
}
1367
1368
static void l2cap_le_start(struct l2cap_chan *chan)
1369
{
1370
struct l2cap_conn *conn = chan->conn;
1371
1372
if (!smp_conn_security(conn->hcon, chan->sec_level))
1373
return;
1374
1375
if (!chan->psm) {
1376
l2cap_chan_ready(chan);
1377
return;
1378
}
1379
1380
if (chan->state == BT_CONNECT) {
1381
if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1382
l2cap_ecred_connect(chan);
1383
else
1384
l2cap_le_connect(chan);
1385
}
1386
}
1387
1388
static void l2cap_start_connection(struct l2cap_chan *chan)
1389
{
1390
if (chan->conn->hcon->type == LE_LINK) {
1391
l2cap_le_start(chan);
1392
} else {
1393
l2cap_send_conn_req(chan);
1394
}
1395
}
1396
1397
static void l2cap_request_info(struct l2cap_conn *conn)
1398
{
1399
struct l2cap_info_req req;
1400
1401
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1402
return;
1403
1404
req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1405
1406
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1407
conn->info_ident = l2cap_get_ident(conn);
1408
1409
schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1410
1411
l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1412
sizeof(req), &req);
1413
}
1414
1415
static bool l2cap_check_enc_key_size(struct hci_conn *hcon,
1416
struct l2cap_chan *chan)
1417
{
1418
/* The minimum encryption key size needs to be enforced by the
1419
* host stack before establishing any L2CAP connections. The
1420
* specification in theory allows a minimum of 1, but to align
1421
* BR/EDR and LE transports, a minimum of 7 is chosen.
1422
*
1423
* This check might also be called for unencrypted connections
1424
* that have no key size requirements. Ensure that the link is
1425
* actually encrypted before enforcing a key size.
1426
*/
1427
int min_key_size = hcon->hdev->min_enc_key_size;
1428
1429
/* On FIPS security level, key size must be 16 bytes */
1430
if (chan->sec_level == BT_SECURITY_FIPS)
1431
min_key_size = 16;
1432
1433
return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1434
hcon->enc_key_size >= min_key_size);
1435
}
1436
1437
static void l2cap_do_start(struct l2cap_chan *chan)
1438
{
1439
struct l2cap_conn *conn = chan->conn;
1440
1441
if (conn->hcon->type == LE_LINK) {
1442
l2cap_le_start(chan);
1443
return;
1444
}
1445
1446
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1447
l2cap_request_info(conn);
1448
return;
1449
}
1450
1451
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1452
return;
1453
1454
if (!l2cap_chan_check_security(chan, true) ||
1455
!__l2cap_no_conn_pending(chan))
1456
return;
1457
1458
if (l2cap_check_enc_key_size(conn->hcon, chan))
1459
l2cap_start_connection(chan);
1460
else
1461
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1462
}
1463
1464
static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1465
{
1466
u32 local_feat_mask = l2cap_feat_mask;
1467
if (!disable_ertm)
1468
local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1469
1470
switch (mode) {
1471
case L2CAP_MODE_ERTM:
1472
return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1473
case L2CAP_MODE_STREAMING:
1474
return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1475
default:
1476
return 0x00;
1477
}
1478
}
1479
1480
static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1481
{
1482
struct l2cap_conn *conn = chan->conn;
1483
struct l2cap_disconn_req req;
1484
1485
if (!conn)
1486
return;
1487
1488
if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1489
__clear_retrans_timer(chan);
1490
__clear_monitor_timer(chan);
1491
__clear_ack_timer(chan);
1492
}
1493
1494
req.dcid = cpu_to_le16(chan->dcid);
1495
req.scid = cpu_to_le16(chan->scid);
1496
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1497
sizeof(req), &req);
1498
1499
l2cap_state_change_and_error(chan, BT_DISCONN, err);
1500
}
1501
1502
/* ---- L2CAP connections ---- */
1503
static void l2cap_conn_start(struct l2cap_conn *conn)
1504
{
1505
struct l2cap_chan *chan, *tmp;
1506
1507
BT_DBG("conn %p", conn);
1508
1509
list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1510
l2cap_chan_lock(chan);
1511
1512
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1513
l2cap_chan_ready(chan);
1514
l2cap_chan_unlock(chan);
1515
continue;
1516
}
1517
1518
if (chan->state == BT_CONNECT) {
1519
if (!l2cap_chan_check_security(chan, true) ||
1520
!__l2cap_no_conn_pending(chan)) {
1521
l2cap_chan_unlock(chan);
1522
continue;
1523
}
1524
1525
if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1526
&& test_bit(CONF_STATE2_DEVICE,
1527
&chan->conf_state)) {
1528
l2cap_chan_close(chan, ECONNRESET);
1529
l2cap_chan_unlock(chan);
1530
continue;
1531
}
1532
1533
if (l2cap_check_enc_key_size(conn->hcon, chan))
1534
l2cap_start_connection(chan);
1535
else
1536
l2cap_chan_close(chan, ECONNREFUSED);
1537
1538
} else if (chan->state == BT_CONNECT2) {
1539
struct l2cap_conn_rsp rsp;
1540
char buf[128];
1541
rsp.scid = cpu_to_le16(chan->dcid);
1542
rsp.dcid = cpu_to_le16(chan->scid);
1543
1544
if (l2cap_chan_check_security(chan, false)) {
1545
if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1546
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1547
rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1548
chan->ops->defer(chan);
1549
1550
} else {
1551
l2cap_state_change(chan, BT_CONFIG);
1552
rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1553
rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1554
}
1555
} else {
1556
rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1557
rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1558
}
1559
1560
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1561
sizeof(rsp), &rsp);
1562
1563
if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1564
rsp.result != L2CAP_CR_SUCCESS) {
1565
l2cap_chan_unlock(chan);
1566
continue;
1567
}
1568
1569
set_bit(CONF_REQ_SENT, &chan->conf_state);
1570
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1571
l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1572
chan->num_conf_req++;
1573
}
1574
1575
l2cap_chan_unlock(chan);
1576
}
1577
}
1578
1579
static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1580
{
1581
struct hci_conn *hcon = conn->hcon;
1582
struct hci_dev *hdev = hcon->hdev;
1583
1584
BT_DBG("%s conn %p", hdev->name, conn);
1585
1586
/* For outgoing pairing which doesn't necessarily have an
1587
* associated socket (e.g. mgmt_pair_device).
1588
*/
1589
if (hcon->out)
1590
smp_conn_security(hcon, hcon->pending_sec_level);
1591
1592
/* For LE peripheral connections, make sure the connection interval
1593
* is in the range of the minimum and maximum interval that has
1594
* been configured for this connection. If not, then trigger
1595
* the connection update procedure.
1596
*/
1597
if (hcon->role == HCI_ROLE_SLAVE &&
1598
(hcon->le_conn_interval < hcon->le_conn_min_interval ||
1599
hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1600
struct l2cap_conn_param_update_req req;
1601
1602
req.min = cpu_to_le16(hcon->le_conn_min_interval);
1603
req.max = cpu_to_le16(hcon->le_conn_max_interval);
1604
req.latency = cpu_to_le16(hcon->le_conn_latency);
1605
req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1606
1607
l2cap_send_cmd(conn, l2cap_get_ident(conn),
1608
L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1609
}
1610
}
1611
1612
static void l2cap_conn_ready(struct l2cap_conn *conn)
1613
{
1614
struct l2cap_chan *chan;
1615
struct hci_conn *hcon = conn->hcon;
1616
1617
BT_DBG("conn %p", conn);
1618
1619
if (hcon->type == ACL_LINK)
1620
l2cap_request_info(conn);
1621
1622
mutex_lock(&conn->lock);
1623
1624
list_for_each_entry(chan, &conn->chan_l, list) {
1625
1626
l2cap_chan_lock(chan);
1627
1628
if (hcon->type == LE_LINK) {
1629
l2cap_le_start(chan);
1630
} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1631
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1632
l2cap_chan_ready(chan);
1633
} else if (chan->state == BT_CONNECT) {
1634
l2cap_do_start(chan);
1635
}
1636
1637
l2cap_chan_unlock(chan);
1638
}
1639
1640
mutex_unlock(&conn->lock);
1641
1642
if (hcon->type == LE_LINK)
1643
l2cap_le_conn_ready(conn);
1644
1645
queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1646
}
1647
1648
/* Notify sockets that we cannot guaranty reliability anymore */
1649
static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1650
{
1651
struct l2cap_chan *chan;
1652
1653
BT_DBG("conn %p", conn);
1654
1655
list_for_each_entry(chan, &conn->chan_l, list) {
1656
if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1657
l2cap_chan_set_err(chan, err);
1658
}
1659
}
1660
1661
static void l2cap_info_timeout(struct work_struct *work)
1662
{
1663
struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1664
info_timer.work);
1665
1666
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1667
conn->info_ident = 0;
1668
1669
mutex_lock(&conn->lock);
1670
l2cap_conn_start(conn);
1671
mutex_unlock(&conn->lock);
1672
}
1673
1674
/*
1675
* l2cap_user
1676
* External modules can register l2cap_user objects on l2cap_conn. The ->probe
1677
* callback is called during registration. The ->remove callback is called
1678
* during unregistration.
1679
* An l2cap_user object can either be explicitly unregistered or when the
1680
* underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1681
* l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1682
* External modules must own a reference to the l2cap_conn object if they intend
1683
* to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1684
* any time if they don't.
1685
*/
1686
1687
int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1688
{
1689
struct hci_dev *hdev = conn->hcon->hdev;
1690
int ret;
1691
1692
/* We need to check whether l2cap_conn is registered. If it is not, we
1693
* must not register the l2cap_user. l2cap_conn_del() is unregisters
1694
* l2cap_conn objects, but doesn't provide its own locking. Instead, it
1695
* relies on the parent hci_conn object to be locked. This itself relies
1696
* on the hci_dev object to be locked. So we must lock the hci device
1697
* here, too. */
1698
1699
hci_dev_lock(hdev);
1700
1701
if (!list_empty(&user->list)) {
1702
ret = -EINVAL;
1703
goto out_unlock;
1704
}
1705
1706
/* conn->hchan is NULL after l2cap_conn_del() was called */
1707
if (!conn->hchan) {
1708
ret = -ENODEV;
1709
goto out_unlock;
1710
}
1711
1712
ret = user->probe(conn, user);
1713
if (ret)
1714
goto out_unlock;
1715
1716
list_add(&user->list, &conn->users);
1717
ret = 0;
1718
1719
out_unlock:
1720
hci_dev_unlock(hdev);
1721
return ret;
1722
}
1723
EXPORT_SYMBOL(l2cap_register_user);
1724
1725
void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1726
{
1727
struct hci_dev *hdev = conn->hcon->hdev;
1728
1729
hci_dev_lock(hdev);
1730
1731
if (list_empty(&user->list))
1732
goto out_unlock;
1733
1734
list_del_init(&user->list);
1735
user->remove(conn, user);
1736
1737
out_unlock:
1738
hci_dev_unlock(hdev);
1739
}
1740
EXPORT_SYMBOL(l2cap_unregister_user);
1741
1742
static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1743
{
1744
struct l2cap_user *user;
1745
1746
while (!list_empty(&conn->users)) {
1747
user = list_first_entry(&conn->users, struct l2cap_user, list);
1748
list_del_init(&user->list);
1749
user->remove(conn, user);
1750
}
1751
}
1752
1753
static void l2cap_conn_del(struct hci_conn *hcon, int err)
1754
{
1755
struct l2cap_conn *conn = hcon->l2cap_data;
1756
struct l2cap_chan *chan, *l;
1757
1758
if (!conn)
1759
return;
1760
1761
BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1762
1763
mutex_lock(&conn->lock);
1764
1765
kfree_skb(conn->rx_skb);
1766
1767
skb_queue_purge(&conn->pending_rx);
1768
1769
/* We can not call flush_work(&conn->pending_rx_work) here since we
1770
* might block if we are running on a worker from the same workqueue
1771
* pending_rx_work is waiting on.
1772
*/
1773
if (work_pending(&conn->pending_rx_work))
1774
cancel_work_sync(&conn->pending_rx_work);
1775
1776
cancel_delayed_work_sync(&conn->id_addr_timer);
1777
1778
l2cap_unregister_all_users(conn);
1779
1780
/* Force the connection to be immediately dropped */
1781
hcon->disc_timeout = 0;
1782
1783
/* Kill channels */
1784
list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1785
l2cap_chan_hold(chan);
1786
l2cap_chan_lock(chan);
1787
1788
l2cap_chan_del(chan, err);
1789
1790
chan->ops->close(chan);
1791
1792
l2cap_chan_unlock(chan);
1793
l2cap_chan_put(chan);
1794
}
1795
1796
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1797
cancel_delayed_work_sync(&conn->info_timer);
1798
1799
hci_chan_del(conn->hchan);
1800
conn->hchan = NULL;
1801
1802
hcon->l2cap_data = NULL;
1803
mutex_unlock(&conn->lock);
1804
l2cap_conn_put(conn);
1805
}
1806
1807
static void l2cap_conn_free(struct kref *ref)
1808
{
1809
struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1810
1811
hci_conn_put(conn->hcon);
1812
kfree(conn);
1813
}
1814
1815
struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1816
{
1817
kref_get(&conn->ref);
1818
return conn;
1819
}
1820
EXPORT_SYMBOL(l2cap_conn_get);
1821
1822
void l2cap_conn_put(struct l2cap_conn *conn)
1823
{
1824
kref_put(&conn->ref, l2cap_conn_free);
1825
}
1826
EXPORT_SYMBOL(l2cap_conn_put);
1827
1828
/* ---- Socket interface ---- */
1829
1830
/* Find socket with psm and source / destination bdaddr.
1831
* Returns closest match.
1832
*/
1833
static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1834
bdaddr_t *src,
1835
bdaddr_t *dst,
1836
u8 link_type)
1837
{
1838
struct l2cap_chan *c, *tmp, *c1 = NULL;
1839
1840
read_lock(&chan_list_lock);
1841
1842
list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1843
if (state && c->state != state)
1844
continue;
1845
1846
if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1847
continue;
1848
1849
if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1850
continue;
1851
1852
if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1853
int src_match, dst_match;
1854
int src_any, dst_any;
1855
1856
/* Exact match. */
1857
src_match = !bacmp(&c->src, src);
1858
dst_match = !bacmp(&c->dst, dst);
1859
if (src_match && dst_match) {
1860
if (!l2cap_chan_hold_unless_zero(c))
1861
continue;
1862
1863
read_unlock(&chan_list_lock);
1864
return c;
1865
}
1866
1867
/* Closest match */
1868
src_any = !bacmp(&c->src, BDADDR_ANY);
1869
dst_any = !bacmp(&c->dst, BDADDR_ANY);
1870
if ((src_match && dst_any) || (src_any && dst_match) ||
1871
(src_any && dst_any))
1872
c1 = c;
1873
}
1874
}
1875
1876
if (c1)
1877
c1 = l2cap_chan_hold_unless_zero(c1);
1878
1879
read_unlock(&chan_list_lock);
1880
1881
return c1;
1882
}
1883
1884
static void l2cap_monitor_timeout(struct work_struct *work)
1885
{
1886
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1887
monitor_timer.work);
1888
1889
BT_DBG("chan %p", chan);
1890
1891
l2cap_chan_lock(chan);
1892
1893
if (!chan->conn) {
1894
l2cap_chan_unlock(chan);
1895
l2cap_chan_put(chan);
1896
return;
1897
}
1898
1899
l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1900
1901
l2cap_chan_unlock(chan);
1902
l2cap_chan_put(chan);
1903
}
1904
1905
static void l2cap_retrans_timeout(struct work_struct *work)
1906
{
1907
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1908
retrans_timer.work);
1909
1910
BT_DBG("chan %p", chan);
1911
1912
l2cap_chan_lock(chan);
1913
1914
if (!chan->conn) {
1915
l2cap_chan_unlock(chan);
1916
l2cap_chan_put(chan);
1917
return;
1918
}
1919
1920
l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1921
l2cap_chan_unlock(chan);
1922
l2cap_chan_put(chan);
1923
}
1924
1925
static void l2cap_streaming_send(struct l2cap_chan *chan,
1926
struct sk_buff_head *skbs)
1927
{
1928
struct sk_buff *skb;
1929
struct l2cap_ctrl *control;
1930
1931
BT_DBG("chan %p, skbs %p", chan, skbs);
1932
1933
skb_queue_splice_tail_init(skbs, &chan->tx_q);
1934
1935
while (!skb_queue_empty(&chan->tx_q)) {
1936
1937
skb = skb_dequeue(&chan->tx_q);
1938
1939
bt_cb(skb)->l2cap.retries = 1;
1940
control = &bt_cb(skb)->l2cap;
1941
1942
control->reqseq = 0;
1943
control->txseq = chan->next_tx_seq;
1944
1945
__pack_control(chan, control, skb);
1946
1947
if (chan->fcs == L2CAP_FCS_CRC16) {
1948
u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1949
put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1950
}
1951
1952
l2cap_do_send(chan, skb);
1953
1954
BT_DBG("Sent txseq %u", control->txseq);
1955
1956
chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1957
chan->frames_sent++;
1958
}
1959
}
1960
1961
static int l2cap_ertm_send(struct l2cap_chan *chan)
1962
{
1963
struct sk_buff *skb, *tx_skb;
1964
struct l2cap_ctrl *control;
1965
int sent = 0;
1966
1967
BT_DBG("chan %p", chan);
1968
1969
if (chan->state != BT_CONNECTED)
1970
return -ENOTCONN;
1971
1972
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1973
return 0;
1974
1975
while (chan->tx_send_head &&
1976
chan->unacked_frames < chan->remote_tx_win &&
1977
chan->tx_state == L2CAP_TX_STATE_XMIT) {
1978
1979
skb = chan->tx_send_head;
1980
1981
bt_cb(skb)->l2cap.retries = 1;
1982
control = &bt_cb(skb)->l2cap;
1983
1984
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1985
control->final = 1;
1986
1987
control->reqseq = chan->buffer_seq;
1988
chan->last_acked_seq = chan->buffer_seq;
1989
control->txseq = chan->next_tx_seq;
1990
1991
__pack_control(chan, control, skb);
1992
1993
if (chan->fcs == L2CAP_FCS_CRC16) {
1994
u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1995
put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1996
}
1997
1998
/* Clone after data has been modified. Data is assumed to be
1999
read-only (for locking purposes) on cloned sk_buffs.
2000
*/
2001
tx_skb = skb_clone(skb, GFP_KERNEL);
2002
2003
if (!tx_skb)
2004
break;
2005
2006
__set_retrans_timer(chan);
2007
2008
chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2009
chan->unacked_frames++;
2010
chan->frames_sent++;
2011
sent++;
2012
2013
if (skb_queue_is_last(&chan->tx_q, skb))
2014
chan->tx_send_head = NULL;
2015
else
2016
chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2017
2018
l2cap_do_send(chan, tx_skb);
2019
BT_DBG("Sent txseq %u", control->txseq);
2020
}
2021
2022
BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2023
chan->unacked_frames, skb_queue_len(&chan->tx_q));
2024
2025
return sent;
2026
}
2027
2028
static void l2cap_ertm_resend(struct l2cap_chan *chan)
2029
{
2030
struct l2cap_ctrl control;
2031
struct sk_buff *skb;
2032
struct sk_buff *tx_skb;
2033
u16 seq;
2034
2035
BT_DBG("chan %p", chan);
2036
2037
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2038
return;
2039
2040
while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2041
seq = l2cap_seq_list_pop(&chan->retrans_list);
2042
2043
skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2044
if (!skb) {
2045
BT_DBG("Error: Can't retransmit seq %d, frame missing",
2046
seq);
2047
continue;
2048
}
2049
2050
bt_cb(skb)->l2cap.retries++;
2051
control = bt_cb(skb)->l2cap;
2052
2053
if (chan->max_tx != 0 &&
2054
bt_cb(skb)->l2cap.retries > chan->max_tx) {
2055
BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2056
l2cap_send_disconn_req(chan, ECONNRESET);
2057
l2cap_seq_list_clear(&chan->retrans_list);
2058
break;
2059
}
2060
2061
control.reqseq = chan->buffer_seq;
2062
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2063
control.final = 1;
2064
else
2065
control.final = 0;
2066
2067
if (skb_cloned(skb)) {
2068
/* Cloned sk_buffs are read-only, so we need a
2069
* writeable copy
2070
*/
2071
tx_skb = skb_copy(skb, GFP_KERNEL);
2072
} else {
2073
tx_skb = skb_clone(skb, GFP_KERNEL);
2074
}
2075
2076
if (!tx_skb) {
2077
l2cap_seq_list_clear(&chan->retrans_list);
2078
break;
2079
}
2080
2081
/* Update skb contents */
2082
if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2083
put_unaligned_le32(__pack_extended_control(&control),
2084
tx_skb->data + L2CAP_HDR_SIZE);
2085
} else {
2086
put_unaligned_le16(__pack_enhanced_control(&control),
2087
tx_skb->data + L2CAP_HDR_SIZE);
2088
}
2089
2090
/* Update FCS */
2091
if (chan->fcs == L2CAP_FCS_CRC16) {
2092
u16 fcs = crc16(0, (u8 *) tx_skb->data,
2093
tx_skb->len - L2CAP_FCS_SIZE);
2094
put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2095
L2CAP_FCS_SIZE);
2096
}
2097
2098
l2cap_do_send(chan, tx_skb);
2099
2100
BT_DBG("Resent txseq %d", control.txseq);
2101
2102
chan->last_acked_seq = chan->buffer_seq;
2103
}
2104
}
2105
2106
static void l2cap_retransmit(struct l2cap_chan *chan,
2107
struct l2cap_ctrl *control)
2108
{
2109
BT_DBG("chan %p, control %p", chan, control);
2110
2111
l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2112
l2cap_ertm_resend(chan);
2113
}
2114
2115
static void l2cap_retransmit_all(struct l2cap_chan *chan,
2116
struct l2cap_ctrl *control)
2117
{
2118
struct sk_buff *skb;
2119
2120
BT_DBG("chan %p, control %p", chan, control);
2121
2122
if (control->poll)
2123
set_bit(CONN_SEND_FBIT, &chan->conn_state);
2124
2125
l2cap_seq_list_clear(&chan->retrans_list);
2126
2127
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2128
return;
2129
2130
if (chan->unacked_frames) {
2131
skb_queue_walk(&chan->tx_q, skb) {
2132
if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2133
skb == chan->tx_send_head)
2134
break;
2135
}
2136
2137
skb_queue_walk_from(&chan->tx_q, skb) {
2138
if (skb == chan->tx_send_head)
2139
break;
2140
2141
l2cap_seq_list_append(&chan->retrans_list,
2142
bt_cb(skb)->l2cap.txseq);
2143
}
2144
2145
l2cap_ertm_resend(chan);
2146
}
2147
}
2148
2149
static void l2cap_send_ack(struct l2cap_chan *chan)
2150
{
2151
struct l2cap_ctrl control;
2152
u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2153
chan->last_acked_seq);
2154
int threshold;
2155
2156
BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2157
chan, chan->last_acked_seq, chan->buffer_seq);
2158
2159
memset(&control, 0, sizeof(control));
2160
control.sframe = 1;
2161
2162
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2163
chan->rx_state == L2CAP_RX_STATE_RECV) {
2164
__clear_ack_timer(chan);
2165
control.super = L2CAP_SUPER_RNR;
2166
control.reqseq = chan->buffer_seq;
2167
l2cap_send_sframe(chan, &control);
2168
} else {
2169
if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2170
l2cap_ertm_send(chan);
2171
/* If any i-frames were sent, they included an ack */
2172
if (chan->buffer_seq == chan->last_acked_seq)
2173
frames_to_ack = 0;
2174
}
2175
2176
/* Ack now if the window is 3/4ths full.
2177
* Calculate without mul or div
2178
*/
2179
threshold = chan->ack_win;
2180
threshold += threshold << 1;
2181
threshold >>= 2;
2182
2183
BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2184
threshold);
2185
2186
if (frames_to_ack >= threshold) {
2187
__clear_ack_timer(chan);
2188
control.super = L2CAP_SUPER_RR;
2189
control.reqseq = chan->buffer_seq;
2190
l2cap_send_sframe(chan, &control);
2191
frames_to_ack = 0;
2192
}
2193
2194
if (frames_to_ack)
2195
__set_ack_timer(chan);
2196
}
2197
}
2198
2199
static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2200
struct msghdr *msg, int len,
2201
int count, struct sk_buff *skb)
2202
{
2203
struct l2cap_conn *conn = chan->conn;
2204
struct sk_buff **frag;
2205
int sent = 0;
2206
2207
if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2208
return -EFAULT;
2209
2210
sent += count;
2211
len -= count;
2212
2213
/* Continuation fragments (no L2CAP header) */
2214
frag = &skb_shinfo(skb)->frag_list;
2215
while (len) {
2216
struct sk_buff *tmp;
2217
2218
count = min_t(unsigned int, conn->mtu, len);
2219
2220
tmp = chan->ops->alloc_skb(chan, 0, count,
2221
msg->msg_flags & MSG_DONTWAIT);
2222
if (IS_ERR(tmp))
2223
return PTR_ERR(tmp);
2224
2225
*frag = tmp;
2226
2227
if (!copy_from_iter_full(skb_put(*frag, count), count,
2228
&msg->msg_iter))
2229
return -EFAULT;
2230
2231
sent += count;
2232
len -= count;
2233
2234
skb->len += (*frag)->len;
2235
skb->data_len += (*frag)->len;
2236
2237
frag = &(*frag)->next;
2238
}
2239
2240
return sent;
2241
}
2242
2243
static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2244
struct msghdr *msg, size_t len)
2245
{
2246
struct l2cap_conn *conn = chan->conn;
2247
struct sk_buff *skb;
2248
int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2249
struct l2cap_hdr *lh;
2250
2251
BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2252
__le16_to_cpu(chan->psm), len);
2253
2254
count = min_t(unsigned int, (conn->mtu - hlen), len);
2255
2256
skb = chan->ops->alloc_skb(chan, hlen, count,
2257
msg->msg_flags & MSG_DONTWAIT);
2258
if (IS_ERR(skb))
2259
return skb;
2260
2261
/* Create L2CAP header */
2262
lh = skb_put(skb, L2CAP_HDR_SIZE);
2263
lh->cid = cpu_to_le16(chan->dcid);
2264
lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2265
put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2266
2267
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2268
if (unlikely(err < 0)) {
2269
kfree_skb(skb);
2270
return ERR_PTR(err);
2271
}
2272
return skb;
2273
}
2274
2275
static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2276
struct msghdr *msg, size_t len)
2277
{
2278
struct l2cap_conn *conn = chan->conn;
2279
struct sk_buff *skb;
2280
int err, count;
2281
struct l2cap_hdr *lh;
2282
2283
BT_DBG("chan %p len %zu", chan, len);
2284
2285
count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2286
2287
skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2288
msg->msg_flags & MSG_DONTWAIT);
2289
if (IS_ERR(skb))
2290
return skb;
2291
2292
/* Create L2CAP header */
2293
lh = skb_put(skb, L2CAP_HDR_SIZE);
2294
lh->cid = cpu_to_le16(chan->dcid);
2295
lh->len = cpu_to_le16(len);
2296
2297
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2298
if (unlikely(err < 0)) {
2299
kfree_skb(skb);
2300
return ERR_PTR(err);
2301
}
2302
return skb;
2303
}
2304
2305
static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2306
struct msghdr *msg, size_t len,
2307
u16 sdulen)
2308
{
2309
struct l2cap_conn *conn = chan->conn;
2310
struct sk_buff *skb;
2311
int err, count, hlen;
2312
struct l2cap_hdr *lh;
2313
2314
BT_DBG("chan %p len %zu", chan, len);
2315
2316
if (!conn)
2317
return ERR_PTR(-ENOTCONN);
2318
2319
hlen = __ertm_hdr_size(chan);
2320
2321
if (sdulen)
2322
hlen += L2CAP_SDULEN_SIZE;
2323
2324
if (chan->fcs == L2CAP_FCS_CRC16)
2325
hlen += L2CAP_FCS_SIZE;
2326
2327
count = min_t(unsigned int, (conn->mtu - hlen), len);
2328
2329
skb = chan->ops->alloc_skb(chan, hlen, count,
2330
msg->msg_flags & MSG_DONTWAIT);
2331
if (IS_ERR(skb))
2332
return skb;
2333
2334
/* Create L2CAP header */
2335
lh = skb_put(skb, L2CAP_HDR_SIZE);
2336
lh->cid = cpu_to_le16(chan->dcid);
2337
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2338
2339
/* Control header is populated later */
2340
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2341
put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2342
else
2343
put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2344
2345
if (sdulen)
2346
put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2347
2348
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2349
if (unlikely(err < 0)) {
2350
kfree_skb(skb);
2351
return ERR_PTR(err);
2352
}
2353
2354
bt_cb(skb)->l2cap.fcs = chan->fcs;
2355
bt_cb(skb)->l2cap.retries = 0;
2356
return skb;
2357
}
2358
2359
static int l2cap_segment_sdu(struct l2cap_chan *chan,
2360
struct sk_buff_head *seg_queue,
2361
struct msghdr *msg, size_t len)
2362
{
2363
struct sk_buff *skb;
2364
u16 sdu_len;
2365
size_t pdu_len;
2366
u8 sar;
2367
2368
BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2369
2370
/* It is critical that ERTM PDUs fit in a single HCI fragment,
2371
* so fragmented skbs are not used. The HCI layer's handling
2372
* of fragmented skbs is not compatible with ERTM's queueing.
2373
*/
2374
2375
/* PDU size is derived from the HCI MTU */
2376
pdu_len = chan->conn->mtu;
2377
2378
/* Constrain PDU size for BR/EDR connections */
2379
pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2380
2381
/* Adjust for largest possible L2CAP overhead. */
2382
if (chan->fcs)
2383
pdu_len -= L2CAP_FCS_SIZE;
2384
2385
pdu_len -= __ertm_hdr_size(chan);
2386
2387
/* Remote device may have requested smaller PDUs */
2388
pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2389
2390
if (len <= pdu_len) {
2391
sar = L2CAP_SAR_UNSEGMENTED;
2392
sdu_len = 0;
2393
pdu_len = len;
2394
} else {
2395
sar = L2CAP_SAR_START;
2396
sdu_len = len;
2397
}
2398
2399
while (len > 0) {
2400
skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2401
2402
if (IS_ERR(skb)) {
2403
__skb_queue_purge(seg_queue);
2404
return PTR_ERR(skb);
2405
}
2406
2407
bt_cb(skb)->l2cap.sar = sar;
2408
__skb_queue_tail(seg_queue, skb);
2409
2410
len -= pdu_len;
2411
if (sdu_len)
2412
sdu_len = 0;
2413
2414
if (len <= pdu_len) {
2415
sar = L2CAP_SAR_END;
2416
pdu_len = len;
2417
} else {
2418
sar = L2CAP_SAR_CONTINUE;
2419
}
2420
}
2421
2422
return 0;
2423
}
2424
2425
static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2426
struct msghdr *msg,
2427
size_t len, u16 sdulen)
2428
{
2429
struct l2cap_conn *conn = chan->conn;
2430
struct sk_buff *skb;
2431
int err, count, hlen;
2432
struct l2cap_hdr *lh;
2433
2434
BT_DBG("chan %p len %zu", chan, len);
2435
2436
if (!conn)
2437
return ERR_PTR(-ENOTCONN);
2438
2439
hlen = L2CAP_HDR_SIZE;
2440
2441
if (sdulen)
2442
hlen += L2CAP_SDULEN_SIZE;
2443
2444
count = min_t(unsigned int, (conn->mtu - hlen), len);
2445
2446
skb = chan->ops->alloc_skb(chan, hlen, count,
2447
msg->msg_flags & MSG_DONTWAIT);
2448
if (IS_ERR(skb))
2449
return skb;
2450
2451
/* Create L2CAP header */
2452
lh = skb_put(skb, L2CAP_HDR_SIZE);
2453
lh->cid = cpu_to_le16(chan->dcid);
2454
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2455
2456
if (sdulen)
2457
put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2458
2459
err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2460
if (unlikely(err < 0)) {
2461
kfree_skb(skb);
2462
return ERR_PTR(err);
2463
}
2464
2465
return skb;
2466
}
2467
2468
static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2469
struct sk_buff_head *seg_queue,
2470
struct msghdr *msg, size_t len)
2471
{
2472
struct sk_buff *skb;
2473
size_t pdu_len;
2474
u16 sdu_len;
2475
2476
BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2477
2478
sdu_len = len;
2479
pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2480
2481
while (len > 0) {
2482
if (len <= pdu_len)
2483
pdu_len = len;
2484
2485
skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2486
if (IS_ERR(skb)) {
2487
__skb_queue_purge(seg_queue);
2488
return PTR_ERR(skb);
2489
}
2490
2491
__skb_queue_tail(seg_queue, skb);
2492
2493
len -= pdu_len;
2494
2495
if (sdu_len) {
2496
sdu_len = 0;
2497
pdu_len += L2CAP_SDULEN_SIZE;
2498
}
2499
}
2500
2501
return 0;
2502
}
2503
2504
static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2505
{
2506
int sent = 0;
2507
2508
BT_DBG("chan %p", chan);
2509
2510
while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2511
l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2512
chan->tx_credits--;
2513
sent++;
2514
}
2515
2516
BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2517
skb_queue_len(&chan->tx_q));
2518
}
2519
2520
static void l2cap_tx_timestamp(struct sk_buff *skb,
2521
const struct sockcm_cookie *sockc,
2522
size_t len)
2523
{
2524
struct sock *sk = skb ? skb->sk : NULL;
2525
2526
if (sk && sk->sk_type == SOCK_STREAM)
2527
hci_setup_tx_timestamp(skb, len, sockc);
2528
else
2529
hci_setup_tx_timestamp(skb, 1, sockc);
2530
}
2531
2532
static void l2cap_tx_timestamp_seg(struct sk_buff_head *queue,
2533
const struct sockcm_cookie *sockc,
2534
size_t len)
2535
{
2536
struct sk_buff *skb = skb_peek(queue);
2537
struct sock *sk = skb ? skb->sk : NULL;
2538
2539
if (sk && sk->sk_type == SOCK_STREAM)
2540
l2cap_tx_timestamp(skb_peek_tail(queue), sockc, len);
2541
else
2542
l2cap_tx_timestamp(skb, sockc, len);
2543
}
2544
2545
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2546
const struct sockcm_cookie *sockc)
2547
{
2548
struct sk_buff *skb;
2549
int err;
2550
struct sk_buff_head seg_queue;
2551
2552
if (!chan->conn)
2553
return -ENOTCONN;
2554
2555
/* Connectionless channel */
2556
if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2557
skb = l2cap_create_connless_pdu(chan, msg, len);
2558
if (IS_ERR(skb))
2559
return PTR_ERR(skb);
2560
2561
l2cap_tx_timestamp(skb, sockc, len);
2562
2563
l2cap_do_send(chan, skb);
2564
return len;
2565
}
2566
2567
switch (chan->mode) {
2568
case L2CAP_MODE_LE_FLOWCTL:
2569
case L2CAP_MODE_EXT_FLOWCTL:
2570
/* Check outgoing MTU */
2571
if (len > chan->omtu)
2572
return -EMSGSIZE;
2573
2574
__skb_queue_head_init(&seg_queue);
2575
2576
err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2577
2578
if (chan->state != BT_CONNECTED) {
2579
__skb_queue_purge(&seg_queue);
2580
err = -ENOTCONN;
2581
}
2582
2583
if (err)
2584
return err;
2585
2586
l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2587
2588
skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2589
2590
l2cap_le_flowctl_send(chan);
2591
2592
if (!chan->tx_credits)
2593
chan->ops->suspend(chan);
2594
2595
err = len;
2596
2597
break;
2598
2599
case L2CAP_MODE_BASIC:
2600
/* Check outgoing MTU */
2601
if (len > chan->omtu)
2602
return -EMSGSIZE;
2603
2604
/* Create a basic PDU */
2605
skb = l2cap_create_basic_pdu(chan, msg, len);
2606
if (IS_ERR(skb))
2607
return PTR_ERR(skb);
2608
2609
l2cap_tx_timestamp(skb, sockc, len);
2610
2611
l2cap_do_send(chan, skb);
2612
err = len;
2613
break;
2614
2615
case L2CAP_MODE_ERTM:
2616
case L2CAP_MODE_STREAMING:
2617
/* Check outgoing MTU */
2618
if (len > chan->omtu) {
2619
err = -EMSGSIZE;
2620
break;
2621
}
2622
2623
__skb_queue_head_init(&seg_queue);
2624
2625
/* Do segmentation before calling in to the state machine,
2626
* since it's possible to block while waiting for memory
2627
* allocation.
2628
*/
2629
err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2630
2631
if (err)
2632
break;
2633
2634
if (chan->mode == L2CAP_MODE_ERTM) {
2635
/* TODO: ERTM mode timestamping */
2636
l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2637
} else {
2638
l2cap_tx_timestamp_seg(&seg_queue, sockc, len);
2639
l2cap_streaming_send(chan, &seg_queue);
2640
}
2641
2642
err = len;
2643
2644
/* If the skbs were not queued for sending, they'll still be in
2645
* seg_queue and need to be purged.
2646
*/
2647
__skb_queue_purge(&seg_queue);
2648
break;
2649
2650
default:
2651
BT_DBG("bad state %1.1x", chan->mode);
2652
err = -EBADFD;
2653
}
2654
2655
return err;
2656
}
2657
EXPORT_SYMBOL_GPL(l2cap_chan_send);
2658
2659
static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2660
{
2661
struct l2cap_ctrl control;
2662
u16 seq;
2663
2664
BT_DBG("chan %p, txseq %u", chan, txseq);
2665
2666
memset(&control, 0, sizeof(control));
2667
control.sframe = 1;
2668
control.super = L2CAP_SUPER_SREJ;
2669
2670
for (seq = chan->expected_tx_seq; seq != txseq;
2671
seq = __next_seq(chan, seq)) {
2672
if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2673
control.reqseq = seq;
2674
l2cap_send_sframe(chan, &control);
2675
l2cap_seq_list_append(&chan->srej_list, seq);
2676
}
2677
}
2678
2679
chan->expected_tx_seq = __next_seq(chan, txseq);
2680
}
2681
2682
static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2683
{
2684
struct l2cap_ctrl control;
2685
2686
BT_DBG("chan %p", chan);
2687
2688
if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2689
return;
2690
2691
memset(&control, 0, sizeof(control));
2692
control.sframe = 1;
2693
control.super = L2CAP_SUPER_SREJ;
2694
control.reqseq = chan->srej_list.tail;
2695
l2cap_send_sframe(chan, &control);
2696
}
2697
2698
static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2699
{
2700
struct l2cap_ctrl control;
2701
u16 initial_head;
2702
u16 seq;
2703
2704
BT_DBG("chan %p, txseq %u", chan, txseq);
2705
2706
memset(&control, 0, sizeof(control));
2707
control.sframe = 1;
2708
control.super = L2CAP_SUPER_SREJ;
2709
2710
/* Capture initial list head to allow only one pass through the list. */
2711
initial_head = chan->srej_list.head;
2712
2713
do {
2714
seq = l2cap_seq_list_pop(&chan->srej_list);
2715
if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2716
break;
2717
2718
control.reqseq = seq;
2719
l2cap_send_sframe(chan, &control);
2720
l2cap_seq_list_append(&chan->srej_list, seq);
2721
} while (chan->srej_list.head != initial_head);
2722
}
2723
2724
static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2725
{
2726
struct sk_buff *acked_skb;
2727
u16 ackseq;
2728
2729
BT_DBG("chan %p, reqseq %u", chan, reqseq);
2730
2731
if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2732
return;
2733
2734
BT_DBG("expected_ack_seq %u, unacked_frames %u",
2735
chan->expected_ack_seq, chan->unacked_frames);
2736
2737
for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2738
ackseq = __next_seq(chan, ackseq)) {
2739
2740
acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2741
if (acked_skb) {
2742
skb_unlink(acked_skb, &chan->tx_q);
2743
kfree_skb(acked_skb);
2744
chan->unacked_frames--;
2745
}
2746
}
2747
2748
chan->expected_ack_seq = reqseq;
2749
2750
if (chan->unacked_frames == 0)
2751
__clear_retrans_timer(chan);
2752
2753
BT_DBG("unacked_frames %u", chan->unacked_frames);
2754
}
2755
2756
static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2757
{
2758
BT_DBG("chan %p", chan);
2759
2760
chan->expected_tx_seq = chan->buffer_seq;
2761
l2cap_seq_list_clear(&chan->srej_list);
2762
skb_queue_purge(&chan->srej_q);
2763
chan->rx_state = L2CAP_RX_STATE_RECV;
2764
}
2765
2766
static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2767
struct l2cap_ctrl *control,
2768
struct sk_buff_head *skbs, u8 event)
2769
{
2770
BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2771
event);
2772
2773
switch (event) {
2774
case L2CAP_EV_DATA_REQUEST:
2775
if (chan->tx_send_head == NULL)
2776
chan->tx_send_head = skb_peek(skbs);
2777
2778
skb_queue_splice_tail_init(skbs, &chan->tx_q);
2779
l2cap_ertm_send(chan);
2780
break;
2781
case L2CAP_EV_LOCAL_BUSY_DETECTED:
2782
BT_DBG("Enter LOCAL_BUSY");
2783
set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2784
2785
if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2786
/* The SREJ_SENT state must be aborted if we are to
2787
* enter the LOCAL_BUSY state.
2788
*/
2789
l2cap_abort_rx_srej_sent(chan);
2790
}
2791
2792
l2cap_send_ack(chan);
2793
2794
break;
2795
case L2CAP_EV_LOCAL_BUSY_CLEAR:
2796
BT_DBG("Exit LOCAL_BUSY");
2797
clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2798
2799
if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2800
struct l2cap_ctrl local_control;
2801
2802
memset(&local_control, 0, sizeof(local_control));
2803
local_control.sframe = 1;
2804
local_control.super = L2CAP_SUPER_RR;
2805
local_control.poll = 1;
2806
local_control.reqseq = chan->buffer_seq;
2807
l2cap_send_sframe(chan, &local_control);
2808
2809
chan->retry_count = 1;
2810
__set_monitor_timer(chan);
2811
chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2812
}
2813
break;
2814
case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2815
l2cap_process_reqseq(chan, control->reqseq);
2816
break;
2817
case L2CAP_EV_EXPLICIT_POLL:
2818
l2cap_send_rr_or_rnr(chan, 1);
2819
chan->retry_count = 1;
2820
__set_monitor_timer(chan);
2821
__clear_ack_timer(chan);
2822
chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2823
break;
2824
case L2CAP_EV_RETRANS_TO:
2825
l2cap_send_rr_or_rnr(chan, 1);
2826
chan->retry_count = 1;
2827
__set_monitor_timer(chan);
2828
chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2829
break;
2830
case L2CAP_EV_RECV_FBIT:
2831
/* Nothing to process */
2832
break;
2833
default:
2834
break;
2835
}
2836
}
2837
2838
static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2839
struct l2cap_ctrl *control,
2840
struct sk_buff_head *skbs, u8 event)
2841
{
2842
BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2843
event);
2844
2845
switch (event) {
2846
case L2CAP_EV_DATA_REQUEST:
2847
if (chan->tx_send_head == NULL)
2848
chan->tx_send_head = skb_peek(skbs);
2849
/* Queue data, but don't send. */
2850
skb_queue_splice_tail_init(skbs, &chan->tx_q);
2851
break;
2852
case L2CAP_EV_LOCAL_BUSY_DETECTED:
2853
BT_DBG("Enter LOCAL_BUSY");
2854
set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2855
2856
if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2857
/* The SREJ_SENT state must be aborted if we are to
2858
* enter the LOCAL_BUSY state.
2859
*/
2860
l2cap_abort_rx_srej_sent(chan);
2861
}
2862
2863
l2cap_send_ack(chan);
2864
2865
break;
2866
case L2CAP_EV_LOCAL_BUSY_CLEAR:
2867
BT_DBG("Exit LOCAL_BUSY");
2868
clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2869
2870
if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2871
struct l2cap_ctrl local_control;
2872
memset(&local_control, 0, sizeof(local_control));
2873
local_control.sframe = 1;
2874
local_control.super = L2CAP_SUPER_RR;
2875
local_control.poll = 1;
2876
local_control.reqseq = chan->buffer_seq;
2877
l2cap_send_sframe(chan, &local_control);
2878
2879
chan->retry_count = 1;
2880
__set_monitor_timer(chan);
2881
chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2882
}
2883
break;
2884
case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2885
l2cap_process_reqseq(chan, control->reqseq);
2886
fallthrough;
2887
2888
case L2CAP_EV_RECV_FBIT:
2889
if (control && control->final) {
2890
__clear_monitor_timer(chan);
2891
if (chan->unacked_frames > 0)
2892
__set_retrans_timer(chan);
2893
chan->retry_count = 0;
2894
chan->tx_state = L2CAP_TX_STATE_XMIT;
2895
BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2896
}
2897
break;
2898
case L2CAP_EV_EXPLICIT_POLL:
2899
/* Ignore */
2900
break;
2901
case L2CAP_EV_MONITOR_TO:
2902
if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2903
l2cap_send_rr_or_rnr(chan, 1);
2904
__set_monitor_timer(chan);
2905
chan->retry_count++;
2906
} else {
2907
l2cap_send_disconn_req(chan, ECONNABORTED);
2908
}
2909
break;
2910
default:
2911
break;
2912
}
2913
}
2914
2915
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2916
struct sk_buff_head *skbs, u8 event)
2917
{
2918
BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2919
chan, control, skbs, event, chan->tx_state);
2920
2921
switch (chan->tx_state) {
2922
case L2CAP_TX_STATE_XMIT:
2923
l2cap_tx_state_xmit(chan, control, skbs, event);
2924
break;
2925
case L2CAP_TX_STATE_WAIT_F:
2926
l2cap_tx_state_wait_f(chan, control, skbs, event);
2927
break;
2928
default:
2929
/* Ignore event */
2930
break;
2931
}
2932
}
2933
2934
static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2935
struct l2cap_ctrl *control)
2936
{
2937
BT_DBG("chan %p, control %p", chan, control);
2938
l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2939
}
2940
2941
static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2942
struct l2cap_ctrl *control)
2943
{
2944
BT_DBG("chan %p, control %p", chan, control);
2945
l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2946
}
2947
2948
/* Copy frame to all raw sockets on that connection */
2949
static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2950
{
2951
struct sk_buff *nskb;
2952
struct l2cap_chan *chan;
2953
2954
BT_DBG("conn %p", conn);
2955
2956
list_for_each_entry(chan, &conn->chan_l, list) {
2957
if (chan->chan_type != L2CAP_CHAN_RAW)
2958
continue;
2959
2960
/* Don't send frame to the channel it came from */
2961
if (bt_cb(skb)->l2cap.chan == chan)
2962
continue;
2963
2964
nskb = skb_clone(skb, GFP_KERNEL);
2965
if (!nskb)
2966
continue;
2967
if (chan->ops->recv(chan, nskb))
2968
kfree_skb(nskb);
2969
}
2970
}
2971
2972
/* ---- L2CAP signalling commands ---- */
2973
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2974
u8 ident, u16 dlen, void *data)
2975
{
2976
struct sk_buff *skb, **frag;
2977
struct l2cap_cmd_hdr *cmd;
2978
struct l2cap_hdr *lh;
2979
int len, count;
2980
2981
BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2982
conn, code, ident, dlen);
2983
2984
if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2985
return NULL;
2986
2987
len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2988
count = min_t(unsigned int, conn->mtu, len);
2989
2990
skb = bt_skb_alloc(count, GFP_KERNEL);
2991
if (!skb)
2992
return NULL;
2993
2994
lh = skb_put(skb, L2CAP_HDR_SIZE);
2995
lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2996
2997
if (conn->hcon->type == LE_LINK)
2998
lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2999
else
3000
lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3001
3002
cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3003
cmd->code = code;
3004
cmd->ident = ident;
3005
cmd->len = cpu_to_le16(dlen);
3006
3007
if (dlen) {
3008
count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3009
skb_put_data(skb, data, count);
3010
data += count;
3011
}
3012
3013
len -= skb->len;
3014
3015
/* Continuation fragments (no L2CAP header) */
3016
frag = &skb_shinfo(skb)->frag_list;
3017
while (len) {
3018
count = min_t(unsigned int, conn->mtu, len);
3019
3020
*frag = bt_skb_alloc(count, GFP_KERNEL);
3021
if (!*frag)
3022
goto fail;
3023
3024
skb_put_data(*frag, data, count);
3025
3026
len -= count;
3027
data += count;
3028
3029
frag = &(*frag)->next;
3030
}
3031
3032
return skb;
3033
3034
fail:
3035
kfree_skb(skb);
3036
return NULL;
3037
}
3038
3039
static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3040
unsigned long *val)
3041
{
3042
struct l2cap_conf_opt *opt = *ptr;
3043
int len;
3044
3045
len = L2CAP_CONF_OPT_SIZE + opt->len;
3046
*ptr += len;
3047
3048
*type = opt->type;
3049
*olen = opt->len;
3050
3051
switch (opt->len) {
3052
case 1:
3053
*val = *((u8 *) opt->val);
3054
break;
3055
3056
case 2:
3057
*val = get_unaligned_le16(opt->val);
3058
break;
3059
3060
case 4:
3061
*val = get_unaligned_le32(opt->val);
3062
break;
3063
3064
default:
3065
*val = (unsigned long) opt->val;
3066
break;
3067
}
3068
3069
BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3070
return len;
3071
}
3072
3073
static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3074
{
3075
struct l2cap_conf_opt *opt = *ptr;
3076
3077
BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3078
3079
if (size < L2CAP_CONF_OPT_SIZE + len)
3080
return;
3081
3082
opt->type = type;
3083
opt->len = len;
3084
3085
switch (len) {
3086
case 1:
3087
*((u8 *) opt->val) = val;
3088
break;
3089
3090
case 2:
3091
put_unaligned_le16(val, opt->val);
3092
break;
3093
3094
case 4:
3095
put_unaligned_le32(val, opt->val);
3096
break;
3097
3098
default:
3099
memcpy(opt->val, (void *) val, len);
3100
break;
3101
}
3102
3103
*ptr += L2CAP_CONF_OPT_SIZE + len;
3104
}
3105
3106
static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3107
{
3108
struct l2cap_conf_efs efs;
3109
3110
switch (chan->mode) {
3111
case L2CAP_MODE_ERTM:
3112
efs.id = chan->local_id;
3113
efs.stype = chan->local_stype;
3114
efs.msdu = cpu_to_le16(chan->local_msdu);
3115
efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3116
efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3117
efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3118
break;
3119
3120
case L2CAP_MODE_STREAMING:
3121
efs.id = 1;
3122
efs.stype = L2CAP_SERV_BESTEFFORT;
3123
efs.msdu = cpu_to_le16(chan->local_msdu);
3124
efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3125
efs.acc_lat = 0;
3126
efs.flush_to = 0;
3127
break;
3128
3129
default:
3130
return;
3131
}
3132
3133
l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3134
(unsigned long) &efs, size);
3135
}
3136
3137
static void l2cap_ack_timeout(struct work_struct *work)
3138
{
3139
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3140
ack_timer.work);
3141
u16 frames_to_ack;
3142
3143
BT_DBG("chan %p", chan);
3144
3145
l2cap_chan_lock(chan);
3146
3147
frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3148
chan->last_acked_seq);
3149
3150
if (frames_to_ack)
3151
l2cap_send_rr_or_rnr(chan, 0);
3152
3153
l2cap_chan_unlock(chan);
3154
l2cap_chan_put(chan);
3155
}
3156
3157
int l2cap_ertm_init(struct l2cap_chan *chan)
3158
{
3159
int err;
3160
3161
chan->next_tx_seq = 0;
3162
chan->expected_tx_seq = 0;
3163
chan->expected_ack_seq = 0;
3164
chan->unacked_frames = 0;
3165
chan->buffer_seq = 0;
3166
chan->frames_sent = 0;
3167
chan->last_acked_seq = 0;
3168
chan->sdu = NULL;
3169
chan->sdu_last_frag = NULL;
3170
chan->sdu_len = 0;
3171
3172
skb_queue_head_init(&chan->tx_q);
3173
3174
if (chan->mode != L2CAP_MODE_ERTM)
3175
return 0;
3176
3177
chan->rx_state = L2CAP_RX_STATE_RECV;
3178
chan->tx_state = L2CAP_TX_STATE_XMIT;
3179
3180
skb_queue_head_init(&chan->srej_q);
3181
3182
err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3183
if (err < 0)
3184
return err;
3185
3186
err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3187
if (err < 0)
3188
l2cap_seq_list_free(&chan->srej_list);
3189
3190
return err;
3191
}
3192
3193
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3194
{
3195
switch (mode) {
3196
case L2CAP_MODE_STREAMING:
3197
case L2CAP_MODE_ERTM:
3198
if (l2cap_mode_supported(mode, remote_feat_mask))
3199
return mode;
3200
fallthrough;
3201
default:
3202
return L2CAP_MODE_BASIC;
3203
}
3204
}
3205
3206
static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3207
{
3208
return (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW);
3209
}
3210
3211
static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3212
{
3213
return (conn->feat_mask & L2CAP_FEAT_EXT_FLOW);
3214
}
3215
3216
static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3217
struct l2cap_conf_rfc *rfc)
3218
{
3219
rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3220
rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3221
}
3222
3223
static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3224
{
3225
if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3226
__l2cap_ews_supported(chan->conn)) {
3227
/* use extended control field */
3228
set_bit(FLAG_EXT_CTRL, &chan->flags);
3229
chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3230
} else {
3231
chan->tx_win = min_t(u16, chan->tx_win,
3232
L2CAP_DEFAULT_TX_WINDOW);
3233
chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3234
}
3235
chan->ack_win = chan->tx_win;
3236
}
3237
3238
static void l2cap_mtu_auto(struct l2cap_chan *chan)
3239
{
3240
struct hci_conn *conn = chan->conn->hcon;
3241
3242
chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3243
3244
/* The 2-DH1 packet has between 2 and 56 information bytes
3245
* (including the 2-byte payload header)
3246
*/
3247
if (!(conn->pkt_type & HCI_2DH1))
3248
chan->imtu = 54;
3249
3250
/* The 3-DH1 packet has between 2 and 85 information bytes
3251
* (including the 2-byte payload header)
3252
*/
3253
if (!(conn->pkt_type & HCI_3DH1))
3254
chan->imtu = 83;
3255
3256
/* The 2-DH3 packet has between 2 and 369 information bytes
3257
* (including the 2-byte payload header)
3258
*/
3259
if (!(conn->pkt_type & HCI_2DH3))
3260
chan->imtu = 367;
3261
3262
/* The 3-DH3 packet has between 2 and 554 information bytes
3263
* (including the 2-byte payload header)
3264
*/
3265
if (!(conn->pkt_type & HCI_3DH3))
3266
chan->imtu = 552;
3267
3268
/* The 2-DH5 packet has between 2 and 681 information bytes
3269
* (including the 2-byte payload header)
3270
*/
3271
if (!(conn->pkt_type & HCI_2DH5))
3272
chan->imtu = 679;
3273
3274
/* The 3-DH5 packet has between 2 and 1023 information bytes
3275
* (including the 2-byte payload header)
3276
*/
3277
if (!(conn->pkt_type & HCI_3DH5))
3278
chan->imtu = 1021;
3279
}
3280
3281
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3282
{
3283
struct l2cap_conf_req *req = data;
3284
struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3285
void *ptr = req->data;
3286
void *endptr = data + data_size;
3287
u16 size;
3288
3289
BT_DBG("chan %p", chan);
3290
3291
if (chan->num_conf_req || chan->num_conf_rsp)
3292
goto done;
3293
3294
switch (chan->mode) {
3295
case L2CAP_MODE_STREAMING:
3296
case L2CAP_MODE_ERTM:
3297
if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3298
break;
3299
3300
if (__l2cap_efs_supported(chan->conn))
3301
set_bit(FLAG_EFS_ENABLE, &chan->flags);
3302
3303
fallthrough;
3304
default:
3305
chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3306
break;
3307
}
3308
3309
done:
3310
if (chan->imtu != L2CAP_DEFAULT_MTU) {
3311
if (!chan->imtu)
3312
l2cap_mtu_auto(chan);
3313
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3314
endptr - ptr);
3315
}
3316
3317
switch (chan->mode) {
3318
case L2CAP_MODE_BASIC:
3319
if (disable_ertm)
3320
break;
3321
3322
if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3323
!(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3324
break;
3325
3326
rfc.mode = L2CAP_MODE_BASIC;
3327
rfc.txwin_size = 0;
3328
rfc.max_transmit = 0;
3329
rfc.retrans_timeout = 0;
3330
rfc.monitor_timeout = 0;
3331
rfc.max_pdu_size = 0;
3332
3333
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3334
(unsigned long) &rfc, endptr - ptr);
3335
break;
3336
3337
case L2CAP_MODE_ERTM:
3338
rfc.mode = L2CAP_MODE_ERTM;
3339
rfc.max_transmit = chan->max_tx;
3340
3341
__l2cap_set_ertm_timeouts(chan, &rfc);
3342
3343
size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3344
L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3345
L2CAP_FCS_SIZE);
3346
rfc.max_pdu_size = cpu_to_le16(size);
3347
3348
l2cap_txwin_setup(chan);
3349
3350
rfc.txwin_size = min_t(u16, chan->tx_win,
3351
L2CAP_DEFAULT_TX_WINDOW);
3352
3353
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3354
(unsigned long) &rfc, endptr - ptr);
3355
3356
if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3357
l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3358
3359
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3360
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3361
chan->tx_win, endptr - ptr);
3362
3363
if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3364
if (chan->fcs == L2CAP_FCS_NONE ||
3365
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3366
chan->fcs = L2CAP_FCS_NONE;
3367
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3368
chan->fcs, endptr - ptr);
3369
}
3370
break;
3371
3372
case L2CAP_MODE_STREAMING:
3373
l2cap_txwin_setup(chan);
3374
rfc.mode = L2CAP_MODE_STREAMING;
3375
rfc.txwin_size = 0;
3376
rfc.max_transmit = 0;
3377
rfc.retrans_timeout = 0;
3378
rfc.monitor_timeout = 0;
3379
3380
size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3381
L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3382
L2CAP_FCS_SIZE);
3383
rfc.max_pdu_size = cpu_to_le16(size);
3384
3385
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3386
(unsigned long) &rfc, endptr - ptr);
3387
3388
if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3389
l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3390
3391
if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3392
if (chan->fcs == L2CAP_FCS_NONE ||
3393
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3394
chan->fcs = L2CAP_FCS_NONE;
3395
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3396
chan->fcs, endptr - ptr);
3397
}
3398
break;
3399
}
3400
3401
req->dcid = cpu_to_le16(chan->dcid);
3402
req->flags = cpu_to_le16(0);
3403
3404
return ptr - data;
3405
}
3406
3407
static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3408
{
3409
struct l2cap_conf_rsp *rsp = data;
3410
void *ptr = rsp->data;
3411
void *endptr = data + data_size;
3412
void *req = chan->conf_req;
3413
int len = chan->conf_len;
3414
int type, hint, olen;
3415
unsigned long val;
3416
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3417
struct l2cap_conf_efs efs;
3418
u8 remote_efs = 0;
3419
u16 mtu = 0;
3420
u16 result = L2CAP_CONF_SUCCESS;
3421
u16 size;
3422
3423
BT_DBG("chan %p", chan);
3424
3425
while (len >= L2CAP_CONF_OPT_SIZE) {
3426
len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3427
if (len < 0)
3428
break;
3429
3430
hint = type & L2CAP_CONF_HINT;
3431
type &= L2CAP_CONF_MASK;
3432
3433
switch (type) {
3434
case L2CAP_CONF_MTU:
3435
if (olen != 2)
3436
break;
3437
mtu = val;
3438
break;
3439
3440
case L2CAP_CONF_FLUSH_TO:
3441
if (olen != 2)
3442
break;
3443
chan->flush_to = val;
3444
break;
3445
3446
case L2CAP_CONF_QOS:
3447
break;
3448
3449
case L2CAP_CONF_RFC:
3450
if (olen != sizeof(rfc))
3451
break;
3452
memcpy(&rfc, (void *) val, olen);
3453
break;
3454
3455
case L2CAP_CONF_FCS:
3456
if (olen != 1)
3457
break;
3458
if (val == L2CAP_FCS_NONE)
3459
set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3460
break;
3461
3462
case L2CAP_CONF_EFS:
3463
if (olen != sizeof(efs))
3464
break;
3465
remote_efs = 1;
3466
memcpy(&efs, (void *) val, olen);
3467
break;
3468
3469
case L2CAP_CONF_EWS:
3470
if (olen != 2)
3471
break;
3472
return -ECONNREFUSED;
3473
3474
default:
3475
if (hint)
3476
break;
3477
result = L2CAP_CONF_UNKNOWN;
3478
l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3479
break;
3480
}
3481
}
3482
3483
if (chan->num_conf_rsp || chan->num_conf_req > 1)
3484
goto done;
3485
3486
switch (chan->mode) {
3487
case L2CAP_MODE_STREAMING:
3488
case L2CAP_MODE_ERTM:
3489
if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3490
chan->mode = l2cap_select_mode(rfc.mode,
3491
chan->conn->feat_mask);
3492
break;
3493
}
3494
3495
if (remote_efs) {
3496
if (__l2cap_efs_supported(chan->conn))
3497
set_bit(FLAG_EFS_ENABLE, &chan->flags);
3498
else
3499
return -ECONNREFUSED;
3500
}
3501
3502
if (chan->mode != rfc.mode)
3503
return -ECONNREFUSED;
3504
3505
break;
3506
}
3507
3508
done:
3509
if (chan->mode != rfc.mode) {
3510
result = L2CAP_CONF_UNACCEPT;
3511
rfc.mode = chan->mode;
3512
3513
if (chan->num_conf_rsp == 1)
3514
return -ECONNREFUSED;
3515
3516
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3517
(unsigned long) &rfc, endptr - ptr);
3518
}
3519
3520
if (result == L2CAP_CONF_SUCCESS) {
3521
/* Configure output options and let the other side know
3522
* which ones we don't like. */
3523
3524
/* If MTU is not provided in configure request, try adjusting it
3525
* to the current output MTU if it has been set
3526
*
3527
* Bluetooth Core 6.1, Vol 3, Part A, Section 4.5
3528
*
3529
* Each configuration parameter value (if any is present) in an
3530
* L2CAP_CONFIGURATION_RSP packet reflects an ‘adjustment’ to a
3531
* configuration parameter value that has been sent (or, in case
3532
* of default values, implied) in the corresponding
3533
* L2CAP_CONFIGURATION_REQ packet.
3534
*/
3535
if (!mtu) {
3536
/* Only adjust for ERTM channels as for older modes the
3537
* remote stack may not be able to detect that the
3538
* adjustment causing it to silently drop packets.
3539
*/
3540
if (chan->mode == L2CAP_MODE_ERTM &&
3541
chan->omtu && chan->omtu != L2CAP_DEFAULT_MTU)
3542
mtu = chan->omtu;
3543
else
3544
mtu = L2CAP_DEFAULT_MTU;
3545
}
3546
3547
if (mtu < L2CAP_DEFAULT_MIN_MTU)
3548
result = L2CAP_CONF_UNACCEPT;
3549
else {
3550
chan->omtu = mtu;
3551
set_bit(CONF_MTU_DONE, &chan->conf_state);
3552
}
3553
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3554
3555
if (remote_efs) {
3556
if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3557
efs.stype != L2CAP_SERV_NOTRAFIC &&
3558
efs.stype != chan->local_stype) {
3559
3560
result = L2CAP_CONF_UNACCEPT;
3561
3562
if (chan->num_conf_req >= 1)
3563
return -ECONNREFUSED;
3564
3565
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3566
sizeof(efs),
3567
(unsigned long) &efs, endptr - ptr);
3568
} else {
3569
/* Send PENDING Conf Rsp */
3570
result = L2CAP_CONF_PENDING;
3571
set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3572
}
3573
}
3574
3575
switch (rfc.mode) {
3576
case L2CAP_MODE_BASIC:
3577
chan->fcs = L2CAP_FCS_NONE;
3578
set_bit(CONF_MODE_DONE, &chan->conf_state);
3579
break;
3580
3581
case L2CAP_MODE_ERTM:
3582
if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3583
chan->remote_tx_win = rfc.txwin_size;
3584
else
3585
rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3586
3587
chan->remote_max_tx = rfc.max_transmit;
3588
3589
size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3590
chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3591
L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3592
rfc.max_pdu_size = cpu_to_le16(size);
3593
chan->remote_mps = size;
3594
3595
__l2cap_set_ertm_timeouts(chan, &rfc);
3596
3597
set_bit(CONF_MODE_DONE, &chan->conf_state);
3598
3599
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3600
sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3601
3602
if (remote_efs &&
3603
test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3604
chan->remote_id = efs.id;
3605
chan->remote_stype = efs.stype;
3606
chan->remote_msdu = le16_to_cpu(efs.msdu);
3607
chan->remote_flush_to =
3608
le32_to_cpu(efs.flush_to);
3609
chan->remote_acc_lat =
3610
le32_to_cpu(efs.acc_lat);
3611
chan->remote_sdu_itime =
3612
le32_to_cpu(efs.sdu_itime);
3613
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3614
sizeof(efs),
3615
(unsigned long) &efs, endptr - ptr);
3616
}
3617
break;
3618
3619
case L2CAP_MODE_STREAMING:
3620
size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3621
chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3622
L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3623
rfc.max_pdu_size = cpu_to_le16(size);
3624
chan->remote_mps = size;
3625
3626
set_bit(CONF_MODE_DONE, &chan->conf_state);
3627
3628
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3629
(unsigned long) &rfc, endptr - ptr);
3630
3631
break;
3632
3633
default:
3634
result = L2CAP_CONF_UNACCEPT;
3635
3636
memset(&rfc, 0, sizeof(rfc));
3637
rfc.mode = chan->mode;
3638
}
3639
3640
if (result == L2CAP_CONF_SUCCESS)
3641
set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3642
}
3643
rsp->scid = cpu_to_le16(chan->dcid);
3644
rsp->result = cpu_to_le16(result);
3645
rsp->flags = cpu_to_le16(0);
3646
3647
return ptr - data;
3648
}
3649
3650
static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3651
void *data, size_t size, u16 *result)
3652
{
3653
struct l2cap_conf_req *req = data;
3654
void *ptr = req->data;
3655
void *endptr = data + size;
3656
int type, olen;
3657
unsigned long val;
3658
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3659
struct l2cap_conf_efs efs;
3660
3661
BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3662
3663
while (len >= L2CAP_CONF_OPT_SIZE) {
3664
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3665
if (len < 0)
3666
break;
3667
3668
switch (type) {
3669
case L2CAP_CONF_MTU:
3670
if (olen != 2)
3671
break;
3672
if (val < L2CAP_DEFAULT_MIN_MTU) {
3673
*result = L2CAP_CONF_UNACCEPT;
3674
chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3675
} else
3676
chan->imtu = val;
3677
l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3678
endptr - ptr);
3679
break;
3680
3681
case L2CAP_CONF_FLUSH_TO:
3682
if (olen != 2)
3683
break;
3684
chan->flush_to = val;
3685
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3686
chan->flush_to, endptr - ptr);
3687
break;
3688
3689
case L2CAP_CONF_RFC:
3690
if (olen != sizeof(rfc))
3691
break;
3692
memcpy(&rfc, (void *)val, olen);
3693
if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3694
rfc.mode != chan->mode)
3695
return -ECONNREFUSED;
3696
chan->fcs = 0;
3697
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3698
(unsigned long) &rfc, endptr - ptr);
3699
break;
3700
3701
case L2CAP_CONF_EWS:
3702
if (olen != 2)
3703
break;
3704
chan->ack_win = min_t(u16, val, chan->ack_win);
3705
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3706
chan->tx_win, endptr - ptr);
3707
break;
3708
3709
case L2CAP_CONF_EFS:
3710
if (olen != sizeof(efs))
3711
break;
3712
memcpy(&efs, (void *)val, olen);
3713
if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3714
efs.stype != L2CAP_SERV_NOTRAFIC &&
3715
efs.stype != chan->local_stype)
3716
return -ECONNREFUSED;
3717
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3718
(unsigned long) &efs, endptr - ptr);
3719
break;
3720
3721
case L2CAP_CONF_FCS:
3722
if (olen != 1)
3723
break;
3724
if (*result == L2CAP_CONF_PENDING)
3725
if (val == L2CAP_FCS_NONE)
3726
set_bit(CONF_RECV_NO_FCS,
3727
&chan->conf_state);
3728
break;
3729
}
3730
}
3731
3732
if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3733
return -ECONNREFUSED;
3734
3735
chan->mode = rfc.mode;
3736
3737
if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3738
switch (rfc.mode) {
3739
case L2CAP_MODE_ERTM:
3740
chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3741
chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3742
chan->mps = le16_to_cpu(rfc.max_pdu_size);
3743
if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3744
chan->ack_win = min_t(u16, chan->ack_win,
3745
rfc.txwin_size);
3746
3747
if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3748
chan->local_msdu = le16_to_cpu(efs.msdu);
3749
chan->local_sdu_itime =
3750
le32_to_cpu(efs.sdu_itime);
3751
chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3752
chan->local_flush_to =
3753
le32_to_cpu(efs.flush_to);
3754
}
3755
break;
3756
3757
case L2CAP_MODE_STREAMING:
3758
chan->mps = le16_to_cpu(rfc.max_pdu_size);
3759
}
3760
}
3761
3762
req->dcid = cpu_to_le16(chan->dcid);
3763
req->flags = cpu_to_le16(0);
3764
3765
return ptr - data;
3766
}
3767
3768
static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3769
u16 result, u16 flags)
3770
{
3771
struct l2cap_conf_rsp *rsp = data;
3772
void *ptr = rsp->data;
3773
3774
BT_DBG("chan %p", chan);
3775
3776
rsp->scid = cpu_to_le16(chan->dcid);
3777
rsp->result = cpu_to_le16(result);
3778
rsp->flags = cpu_to_le16(flags);
3779
3780
return ptr - data;
3781
}
3782
3783
void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3784
{
3785
struct l2cap_le_conn_rsp rsp;
3786
struct l2cap_conn *conn = chan->conn;
3787
3788
BT_DBG("chan %p", chan);
3789
3790
rsp.dcid = cpu_to_le16(chan->scid);
3791
rsp.mtu = cpu_to_le16(chan->imtu);
3792
rsp.mps = cpu_to_le16(chan->mps);
3793
rsp.credits = cpu_to_le16(chan->rx_credits);
3794
rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3795
3796
l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3797
&rsp);
3798
}
3799
3800
static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3801
{
3802
int *result = data;
3803
3804
if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3805
return;
3806
3807
switch (chan->state) {
3808
case BT_CONNECT2:
3809
/* If channel still pending accept add to result */
3810
(*result)++;
3811
return;
3812
case BT_CONNECTED:
3813
return;
3814
default:
3815
/* If not connected or pending accept it has been refused */
3816
*result = -ECONNREFUSED;
3817
return;
3818
}
3819
}
3820
3821
struct l2cap_ecred_rsp_data {
3822
struct {
3823
struct l2cap_ecred_conn_rsp_hdr rsp;
3824
__le16 scid[L2CAP_ECRED_MAX_CID];
3825
} __packed pdu;
3826
int count;
3827
};
3828
3829
static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3830
{
3831
struct l2cap_ecred_rsp_data *rsp = data;
3832
struct l2cap_ecred_conn_rsp *rsp_flex =
3833
container_of(&rsp->pdu.rsp, struct l2cap_ecred_conn_rsp, hdr);
3834
3835
/* Check if channel for outgoing connection or if it wasn't deferred
3836
* since in those cases it must be skipped.
3837
*/
3838
if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) ||
3839
!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
3840
return;
3841
3842
/* Reset ident so only one response is sent */
3843
chan->ident = 0;
3844
3845
/* Include all channels pending with the same ident */
3846
if (!rsp->pdu.rsp.result)
3847
rsp_flex->dcid[rsp->count++] = cpu_to_le16(chan->scid);
3848
else
3849
l2cap_chan_del(chan, ECONNRESET);
3850
}
3851
3852
void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3853
{
3854
struct l2cap_conn *conn = chan->conn;
3855
struct l2cap_ecred_rsp_data data;
3856
u16 id = chan->ident;
3857
int result = 0;
3858
3859
if (!id)
3860
return;
3861
3862
BT_DBG("chan %p id %d", chan, id);
3863
3864
memset(&data, 0, sizeof(data));
3865
3866
data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
3867
data.pdu.rsp.mps = cpu_to_le16(chan->mps);
3868
data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3869
data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3870
3871
/* Verify that all channels are ready */
3872
__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
3873
3874
if (result > 0)
3875
return;
3876
3877
if (result < 0)
3878
data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
3879
3880
/* Build response */
3881
__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
3882
3883
l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
3884
sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
3885
&data.pdu);
3886
}
3887
3888
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3889
{
3890
struct l2cap_conn_rsp rsp;
3891
struct l2cap_conn *conn = chan->conn;
3892
u8 buf[128];
3893
u8 rsp_code;
3894
3895
rsp.scid = cpu_to_le16(chan->dcid);
3896
rsp.dcid = cpu_to_le16(chan->scid);
3897
rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3898
rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3899
rsp_code = L2CAP_CONN_RSP;
3900
3901
BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3902
3903
l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3904
3905
if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3906
return;
3907
3908
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3909
l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3910
chan->num_conf_req++;
3911
}
3912
3913
static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3914
{
3915
int type, olen;
3916
unsigned long val;
3917
/* Use sane default values in case a misbehaving remote device
3918
* did not send an RFC or extended window size option.
3919
*/
3920
u16 txwin_ext = chan->ack_win;
3921
struct l2cap_conf_rfc rfc = {
3922
.mode = chan->mode,
3923
.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3924
.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3925
.max_pdu_size = cpu_to_le16(chan->imtu),
3926
.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3927
};
3928
3929
BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3930
3931
if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3932
return;
3933
3934
while (len >= L2CAP_CONF_OPT_SIZE) {
3935
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3936
if (len < 0)
3937
break;
3938
3939
switch (type) {
3940
case L2CAP_CONF_RFC:
3941
if (olen != sizeof(rfc))
3942
break;
3943
memcpy(&rfc, (void *)val, olen);
3944
break;
3945
case L2CAP_CONF_EWS:
3946
if (olen != 2)
3947
break;
3948
txwin_ext = val;
3949
break;
3950
}
3951
}
3952
3953
switch (rfc.mode) {
3954
case L2CAP_MODE_ERTM:
3955
chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3956
chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3957
chan->mps = le16_to_cpu(rfc.max_pdu_size);
3958
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3959
chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3960
else
3961
chan->ack_win = min_t(u16, chan->ack_win,
3962
rfc.txwin_size);
3963
break;
3964
case L2CAP_MODE_STREAMING:
3965
chan->mps = le16_to_cpu(rfc.max_pdu_size);
3966
}
3967
}
3968
3969
static inline int l2cap_command_rej(struct l2cap_conn *conn,
3970
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3971
u8 *data)
3972
{
3973
struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3974
3975
if (cmd_len < sizeof(*rej))
3976
return -EPROTO;
3977
3978
if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3979
return 0;
3980
3981
if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3982
cmd->ident == conn->info_ident) {
3983
cancel_delayed_work(&conn->info_timer);
3984
3985
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3986
conn->info_ident = 0;
3987
3988
l2cap_conn_start(conn);
3989
}
3990
3991
return 0;
3992
}
3993
3994
static void l2cap_connect(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
3995
u8 *data, u8 rsp_code)
3996
{
3997
struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3998
struct l2cap_conn_rsp rsp;
3999
struct l2cap_chan *chan = NULL, *pchan = NULL;
4000
int result, status = L2CAP_CS_NO_INFO;
4001
4002
u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4003
__le16 psm = req->psm;
4004
4005
BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4006
4007
/* Check if we have socket listening on psm */
4008
pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4009
&conn->hcon->dst, ACL_LINK);
4010
if (!pchan) {
4011
result = L2CAP_CR_BAD_PSM;
4012
goto response;
4013
}
4014
4015
l2cap_chan_lock(pchan);
4016
4017
/* Check if the ACL is secure enough (if not SDP) */
4018
if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4019
(!hci_conn_check_link_mode(conn->hcon) ||
4020
!l2cap_check_enc_key_size(conn->hcon, pchan))) {
4021
conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4022
result = L2CAP_CR_SEC_BLOCK;
4023
goto response;
4024
}
4025
4026
result = L2CAP_CR_NO_MEM;
4027
4028
/* Check for valid dynamic CID range (as per Erratum 3253) */
4029
if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4030
result = L2CAP_CR_INVALID_SCID;
4031
goto response;
4032
}
4033
4034
/* Check if we already have channel with that dcid */
4035
if (__l2cap_get_chan_by_dcid(conn, scid)) {
4036
result = L2CAP_CR_SCID_IN_USE;
4037
goto response;
4038
}
4039
4040
chan = pchan->ops->new_connection(pchan);
4041
if (!chan)
4042
goto response;
4043
4044
/* For certain devices (ex: HID mouse), support for authentication,
4045
* pairing and bonding is optional. For such devices, inorder to avoid
4046
* the ACL alive for too long after L2CAP disconnection, reset the ACL
4047
* disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4048
*/
4049
conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4050
4051
bacpy(&chan->src, &conn->hcon->src);
4052
bacpy(&chan->dst, &conn->hcon->dst);
4053
chan->src_type = bdaddr_src_type(conn->hcon);
4054
chan->dst_type = bdaddr_dst_type(conn->hcon);
4055
chan->psm = psm;
4056
chan->dcid = scid;
4057
4058
__l2cap_chan_add(conn, chan);
4059
4060
dcid = chan->scid;
4061
4062
__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4063
4064
chan->ident = cmd->ident;
4065
4066
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4067
if (l2cap_chan_check_security(chan, false)) {
4068
if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4069
l2cap_state_change(chan, BT_CONNECT2);
4070
result = L2CAP_CR_PEND;
4071
status = L2CAP_CS_AUTHOR_PEND;
4072
chan->ops->defer(chan);
4073
} else {
4074
l2cap_state_change(chan, BT_CONFIG);
4075
result = L2CAP_CR_SUCCESS;
4076
status = L2CAP_CS_NO_INFO;
4077
}
4078
} else {
4079
l2cap_state_change(chan, BT_CONNECT2);
4080
result = L2CAP_CR_PEND;
4081
status = L2CAP_CS_AUTHEN_PEND;
4082
}
4083
} else {
4084
l2cap_state_change(chan, BT_CONNECT2);
4085
result = L2CAP_CR_PEND;
4086
status = L2CAP_CS_NO_INFO;
4087
}
4088
4089
response:
4090
rsp.scid = cpu_to_le16(scid);
4091
rsp.dcid = cpu_to_le16(dcid);
4092
rsp.result = cpu_to_le16(result);
4093
rsp.status = cpu_to_le16(status);
4094
l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4095
4096
if (!pchan)
4097
return;
4098
4099
if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4100
struct l2cap_info_req info;
4101
info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4102
4103
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4104
conn->info_ident = l2cap_get_ident(conn);
4105
4106
schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4107
4108
l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4109
sizeof(info), &info);
4110
}
4111
4112
if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4113
result == L2CAP_CR_SUCCESS) {
4114
u8 buf[128];
4115
set_bit(CONF_REQ_SENT, &chan->conf_state);
4116
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4117
l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4118
chan->num_conf_req++;
4119
}
4120
4121
l2cap_chan_unlock(pchan);
4122
l2cap_chan_put(pchan);
4123
}
4124
4125
static int l2cap_connect_req(struct l2cap_conn *conn,
4126
struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4127
{
4128
if (cmd_len < sizeof(struct l2cap_conn_req))
4129
return -EPROTO;
4130
4131
l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP);
4132
return 0;
4133
}
4134
4135
static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4136
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4137
u8 *data)
4138
{
4139
struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4140
u16 scid, dcid, result, status;
4141
struct l2cap_chan *chan;
4142
u8 req[128];
4143
int err;
4144
4145
if (cmd_len < sizeof(*rsp))
4146
return -EPROTO;
4147
4148
scid = __le16_to_cpu(rsp->scid);
4149
dcid = __le16_to_cpu(rsp->dcid);
4150
result = __le16_to_cpu(rsp->result);
4151
status = __le16_to_cpu(rsp->status);
4152
4153
if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4154
dcid > L2CAP_CID_DYN_END))
4155
return -EPROTO;
4156
4157
BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4158
dcid, scid, result, status);
4159
4160
if (scid) {
4161
chan = __l2cap_get_chan_by_scid(conn, scid);
4162
if (!chan)
4163
return -EBADSLT;
4164
} else {
4165
chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4166
if (!chan)
4167
return -EBADSLT;
4168
}
4169
4170
chan = l2cap_chan_hold_unless_zero(chan);
4171
if (!chan)
4172
return -EBADSLT;
4173
4174
err = 0;
4175
4176
l2cap_chan_lock(chan);
4177
4178
switch (result) {
4179
case L2CAP_CR_SUCCESS:
4180
if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4181
err = -EBADSLT;
4182
break;
4183
}
4184
4185
l2cap_state_change(chan, BT_CONFIG);
4186
chan->ident = 0;
4187
chan->dcid = dcid;
4188
clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4189
4190
if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4191
break;
4192
4193
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4194
l2cap_build_conf_req(chan, req, sizeof(req)), req);
4195
chan->num_conf_req++;
4196
break;
4197
4198
case L2CAP_CR_PEND:
4199
set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4200
break;
4201
4202
default:
4203
l2cap_chan_del(chan, ECONNREFUSED);
4204
break;
4205
}
4206
4207
l2cap_chan_unlock(chan);
4208
l2cap_chan_put(chan);
4209
4210
return err;
4211
}
4212
4213
static inline void set_default_fcs(struct l2cap_chan *chan)
4214
{
4215
/* FCS is enabled only in ERTM or streaming mode, if one or both
4216
* sides request it.
4217
*/
4218
if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4219
chan->fcs = L2CAP_FCS_NONE;
4220
else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4221
chan->fcs = L2CAP_FCS_CRC16;
4222
}
4223
4224
static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4225
u8 ident, u16 flags)
4226
{
4227
struct l2cap_conn *conn = chan->conn;
4228
4229
BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4230
flags);
4231
4232
clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4233
set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4234
4235
l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4236
l2cap_build_conf_rsp(chan, data,
4237
L2CAP_CONF_SUCCESS, flags), data);
4238
}
4239
4240
static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4241
u16 scid, u16 dcid)
4242
{
4243
struct l2cap_cmd_rej_cid rej;
4244
4245
rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4246
rej.scid = __cpu_to_le16(scid);
4247
rej.dcid = __cpu_to_le16(dcid);
4248
4249
l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4250
}
4251
4252
static inline int l2cap_config_req(struct l2cap_conn *conn,
4253
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4254
u8 *data)
4255
{
4256
struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4257
u16 dcid, flags;
4258
u8 rsp[64];
4259
struct l2cap_chan *chan;
4260
int len, err = 0;
4261
4262
if (cmd_len < sizeof(*req))
4263
return -EPROTO;
4264
4265
dcid = __le16_to_cpu(req->dcid);
4266
flags = __le16_to_cpu(req->flags);
4267
4268
BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4269
4270
chan = l2cap_get_chan_by_scid(conn, dcid);
4271
if (!chan) {
4272
cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4273
return 0;
4274
}
4275
4276
if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4277
chan->state != BT_CONNECTED) {
4278
cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4279
chan->dcid);
4280
goto unlock;
4281
}
4282
4283
/* Reject if config buffer is too small. */
4284
len = cmd_len - sizeof(*req);
4285
if (chan->conf_len + len > sizeof(chan->conf_req)) {
4286
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4287
l2cap_build_conf_rsp(chan, rsp,
4288
L2CAP_CONF_REJECT, flags), rsp);
4289
goto unlock;
4290
}
4291
4292
/* Store config. */
4293
memcpy(chan->conf_req + chan->conf_len, req->data, len);
4294
chan->conf_len += len;
4295
4296
if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4297
/* Incomplete config. Send empty response. */
4298
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4299
l2cap_build_conf_rsp(chan, rsp,
4300
L2CAP_CONF_SUCCESS, flags), rsp);
4301
goto unlock;
4302
}
4303
4304
/* Complete config. */
4305
len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4306
if (len < 0) {
4307
l2cap_send_disconn_req(chan, ECONNRESET);
4308
goto unlock;
4309
}
4310
4311
chan->ident = cmd->ident;
4312
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4313
if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4314
chan->num_conf_rsp++;
4315
4316
/* Reset config buffer. */
4317
chan->conf_len = 0;
4318
4319
if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4320
goto unlock;
4321
4322
if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4323
set_default_fcs(chan);
4324
4325
if (chan->mode == L2CAP_MODE_ERTM ||
4326
chan->mode == L2CAP_MODE_STREAMING)
4327
err = l2cap_ertm_init(chan);
4328
4329
if (err < 0)
4330
l2cap_send_disconn_req(chan, -err);
4331
else
4332
l2cap_chan_ready(chan);
4333
4334
goto unlock;
4335
}
4336
4337
if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4338
u8 buf[64];
4339
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4340
l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4341
chan->num_conf_req++;
4342
}
4343
4344
/* Got Conf Rsp PENDING from remote side and assume we sent
4345
Conf Rsp PENDING in the code above */
4346
if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4347
test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4348
4349
/* check compatibility */
4350
4351
/* Send rsp for BR/EDR channel */
4352
l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4353
}
4354
4355
unlock:
4356
l2cap_chan_unlock(chan);
4357
l2cap_chan_put(chan);
4358
return err;
4359
}
4360
4361
static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4362
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4363
u8 *data)
4364
{
4365
struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4366
u16 scid, flags, result;
4367
struct l2cap_chan *chan;
4368
int len = cmd_len - sizeof(*rsp);
4369
int err = 0;
4370
4371
if (cmd_len < sizeof(*rsp))
4372
return -EPROTO;
4373
4374
scid = __le16_to_cpu(rsp->scid);
4375
flags = __le16_to_cpu(rsp->flags);
4376
result = __le16_to_cpu(rsp->result);
4377
4378
BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4379
result, len);
4380
4381
chan = l2cap_get_chan_by_scid(conn, scid);
4382
if (!chan)
4383
return 0;
4384
4385
switch (result) {
4386
case L2CAP_CONF_SUCCESS:
4387
l2cap_conf_rfc_get(chan, rsp->data, len);
4388
clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4389
break;
4390
4391
case L2CAP_CONF_PENDING:
4392
set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4393
4394
if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4395
char buf[64];
4396
4397
len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4398
buf, sizeof(buf), &result);
4399
if (len < 0) {
4400
l2cap_send_disconn_req(chan, ECONNRESET);
4401
goto done;
4402
}
4403
4404
l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, 0);
4405
}
4406
goto done;
4407
4408
case L2CAP_CONF_UNKNOWN:
4409
case L2CAP_CONF_UNACCEPT:
4410
if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4411
char req[64];
4412
4413
if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4414
l2cap_send_disconn_req(chan, ECONNRESET);
4415
goto done;
4416
}
4417
4418
/* throw out any old stored conf requests */
4419
result = L2CAP_CONF_SUCCESS;
4420
len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4421
req, sizeof(req), &result);
4422
if (len < 0) {
4423
l2cap_send_disconn_req(chan, ECONNRESET);
4424
goto done;
4425
}
4426
4427
l2cap_send_cmd(conn, l2cap_get_ident(conn),
4428
L2CAP_CONF_REQ, len, req);
4429
chan->num_conf_req++;
4430
if (result != L2CAP_CONF_SUCCESS)
4431
goto done;
4432
break;
4433
}
4434
fallthrough;
4435
4436
default:
4437
l2cap_chan_set_err(chan, ECONNRESET);
4438
4439
__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4440
l2cap_send_disconn_req(chan, ECONNRESET);
4441
goto done;
4442
}
4443
4444
if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4445
goto done;
4446
4447
set_bit(CONF_INPUT_DONE, &chan->conf_state);
4448
4449
if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4450
set_default_fcs(chan);
4451
4452
if (chan->mode == L2CAP_MODE_ERTM ||
4453
chan->mode == L2CAP_MODE_STREAMING)
4454
err = l2cap_ertm_init(chan);
4455
4456
if (err < 0)
4457
l2cap_send_disconn_req(chan, -err);
4458
else
4459
l2cap_chan_ready(chan);
4460
}
4461
4462
done:
4463
l2cap_chan_unlock(chan);
4464
l2cap_chan_put(chan);
4465
return err;
4466
}
4467
4468
static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4469
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4470
u8 *data)
4471
{
4472
struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4473
struct l2cap_disconn_rsp rsp;
4474
u16 dcid, scid;
4475
struct l2cap_chan *chan;
4476
4477
if (cmd_len != sizeof(*req))
4478
return -EPROTO;
4479
4480
scid = __le16_to_cpu(req->scid);
4481
dcid = __le16_to_cpu(req->dcid);
4482
4483
BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4484
4485
chan = l2cap_get_chan_by_scid(conn, dcid);
4486
if (!chan) {
4487
cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4488
return 0;
4489
}
4490
4491
rsp.dcid = cpu_to_le16(chan->scid);
4492
rsp.scid = cpu_to_le16(chan->dcid);
4493
l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4494
4495
chan->ops->set_shutdown(chan);
4496
4497
l2cap_chan_del(chan, ECONNRESET);
4498
4499
chan->ops->close(chan);
4500
4501
l2cap_chan_unlock(chan);
4502
l2cap_chan_put(chan);
4503
4504
return 0;
4505
}
4506
4507
static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4508
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4509
u8 *data)
4510
{
4511
struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4512
u16 dcid, scid;
4513
struct l2cap_chan *chan;
4514
4515
if (cmd_len != sizeof(*rsp))
4516
return -EPROTO;
4517
4518
scid = __le16_to_cpu(rsp->scid);
4519
dcid = __le16_to_cpu(rsp->dcid);
4520
4521
BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4522
4523
chan = l2cap_get_chan_by_scid(conn, scid);
4524
if (!chan) {
4525
return 0;
4526
}
4527
4528
if (chan->state != BT_DISCONN) {
4529
l2cap_chan_unlock(chan);
4530
l2cap_chan_put(chan);
4531
return 0;
4532
}
4533
4534
l2cap_chan_del(chan, 0);
4535
4536
chan->ops->close(chan);
4537
4538
l2cap_chan_unlock(chan);
4539
l2cap_chan_put(chan);
4540
4541
return 0;
4542
}
4543
4544
static inline int l2cap_information_req(struct l2cap_conn *conn,
4545
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4546
u8 *data)
4547
{
4548
struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4549
u16 type;
4550
4551
if (cmd_len != sizeof(*req))
4552
return -EPROTO;
4553
4554
type = __le16_to_cpu(req->type);
4555
4556
BT_DBG("type 0x%4.4x", type);
4557
4558
if (type == L2CAP_IT_FEAT_MASK) {
4559
u8 buf[8];
4560
u32 feat_mask = l2cap_feat_mask;
4561
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4562
rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4563
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4564
if (!disable_ertm)
4565
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4566
| L2CAP_FEAT_FCS;
4567
4568
put_unaligned_le32(feat_mask, rsp->data);
4569
l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4570
buf);
4571
} else if (type == L2CAP_IT_FIXED_CHAN) {
4572
u8 buf[12];
4573
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4574
4575
rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4576
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4577
rsp->data[0] = conn->local_fixed_chan;
4578
memset(rsp->data + 1, 0, 7);
4579
l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4580
buf);
4581
} else {
4582
struct l2cap_info_rsp rsp;
4583
rsp.type = cpu_to_le16(type);
4584
rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4585
l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4586
&rsp);
4587
}
4588
4589
return 0;
4590
}
4591
4592
static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4593
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4594
u8 *data)
4595
{
4596
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4597
u16 type, result;
4598
4599
if (cmd_len < sizeof(*rsp))
4600
return -EPROTO;
4601
4602
type = __le16_to_cpu(rsp->type);
4603
result = __le16_to_cpu(rsp->result);
4604
4605
BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4606
4607
/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4608
if (cmd->ident != conn->info_ident ||
4609
conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4610
return 0;
4611
4612
cancel_delayed_work(&conn->info_timer);
4613
4614
if (result != L2CAP_IR_SUCCESS) {
4615
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4616
conn->info_ident = 0;
4617
4618
l2cap_conn_start(conn);
4619
4620
return 0;
4621
}
4622
4623
switch (type) {
4624
case L2CAP_IT_FEAT_MASK:
4625
conn->feat_mask = get_unaligned_le32(rsp->data);
4626
4627
if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4628
struct l2cap_info_req req;
4629
req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4630
4631
conn->info_ident = l2cap_get_ident(conn);
4632
4633
l2cap_send_cmd(conn, conn->info_ident,
4634
L2CAP_INFO_REQ, sizeof(req), &req);
4635
} else {
4636
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4637
conn->info_ident = 0;
4638
4639
l2cap_conn_start(conn);
4640
}
4641
break;
4642
4643
case L2CAP_IT_FIXED_CHAN:
4644
conn->remote_fixed_chan = rsp->data[0];
4645
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4646
conn->info_ident = 0;
4647
4648
l2cap_conn_start(conn);
4649
break;
4650
}
4651
4652
return 0;
4653
}
4654
4655
static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4656
struct l2cap_cmd_hdr *cmd,
4657
u16 cmd_len, u8 *data)
4658
{
4659
struct hci_conn *hcon = conn->hcon;
4660
struct l2cap_conn_param_update_req *req;
4661
struct l2cap_conn_param_update_rsp rsp;
4662
u16 min, max, latency, to_multiplier;
4663
int err;
4664
4665
if (hcon->role != HCI_ROLE_MASTER)
4666
return -EINVAL;
4667
4668
if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4669
return -EPROTO;
4670
4671
req = (struct l2cap_conn_param_update_req *) data;
4672
min = __le16_to_cpu(req->min);
4673
max = __le16_to_cpu(req->max);
4674
latency = __le16_to_cpu(req->latency);
4675
to_multiplier = __le16_to_cpu(req->to_multiplier);
4676
4677
BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4678
min, max, latency, to_multiplier);
4679
4680
memset(&rsp, 0, sizeof(rsp));
4681
4682
err = hci_check_conn_params(min, max, latency, to_multiplier);
4683
if (err)
4684
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4685
else
4686
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4687
4688
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4689
sizeof(rsp), &rsp);
4690
4691
if (!err) {
4692
u8 store_hint;
4693
4694
store_hint = hci_le_conn_update(hcon, min, max, latency,
4695
to_multiplier);
4696
mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
4697
store_hint, min, max, latency,
4698
to_multiplier);
4699
4700
}
4701
4702
return 0;
4703
}
4704
4705
static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
4706
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4707
u8 *data)
4708
{
4709
struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
4710
struct hci_conn *hcon = conn->hcon;
4711
u16 dcid, mtu, mps, credits, result;
4712
struct l2cap_chan *chan;
4713
int err, sec_level;
4714
4715
if (cmd_len < sizeof(*rsp))
4716
return -EPROTO;
4717
4718
dcid = __le16_to_cpu(rsp->dcid);
4719
mtu = __le16_to_cpu(rsp->mtu);
4720
mps = __le16_to_cpu(rsp->mps);
4721
credits = __le16_to_cpu(rsp->credits);
4722
result = __le16_to_cpu(rsp->result);
4723
4724
if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
4725
dcid < L2CAP_CID_DYN_START ||
4726
dcid > L2CAP_CID_LE_DYN_END))
4727
return -EPROTO;
4728
4729
BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
4730
dcid, mtu, mps, credits, result);
4731
4732
chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4733
if (!chan)
4734
return -EBADSLT;
4735
4736
err = 0;
4737
4738
l2cap_chan_lock(chan);
4739
4740
switch (result) {
4741
case L2CAP_CR_LE_SUCCESS:
4742
if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4743
err = -EBADSLT;
4744
break;
4745
}
4746
4747
chan->ident = 0;
4748
chan->dcid = dcid;
4749
chan->omtu = mtu;
4750
chan->remote_mps = mps;
4751
chan->tx_credits = credits;
4752
l2cap_chan_ready(chan);
4753
break;
4754
4755
case L2CAP_CR_LE_AUTHENTICATION:
4756
case L2CAP_CR_LE_ENCRYPTION:
4757
/* If we already have MITM protection we can't do
4758
* anything.
4759
*/
4760
if (hcon->sec_level > BT_SECURITY_MEDIUM) {
4761
l2cap_chan_del(chan, ECONNREFUSED);
4762
break;
4763
}
4764
4765
sec_level = hcon->sec_level + 1;
4766
if (chan->sec_level < sec_level)
4767
chan->sec_level = sec_level;
4768
4769
/* We'll need to send a new Connect Request */
4770
clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
4771
4772
smp_conn_security(hcon, chan->sec_level);
4773
break;
4774
4775
default:
4776
l2cap_chan_del(chan, ECONNREFUSED);
4777
break;
4778
}
4779
4780
l2cap_chan_unlock(chan);
4781
4782
return err;
4783
}
4784
4785
static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4786
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4787
u8 *data)
4788
{
4789
int err = 0;
4790
4791
switch (cmd->code) {
4792
case L2CAP_COMMAND_REJ:
4793
l2cap_command_rej(conn, cmd, cmd_len, data);
4794
break;
4795
4796
case L2CAP_CONN_REQ:
4797
err = l2cap_connect_req(conn, cmd, cmd_len, data);
4798
break;
4799
4800
case L2CAP_CONN_RSP:
4801
l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
4802
break;
4803
4804
case L2CAP_CONF_REQ:
4805
err = l2cap_config_req(conn, cmd, cmd_len, data);
4806
break;
4807
4808
case L2CAP_CONF_RSP:
4809
l2cap_config_rsp(conn, cmd, cmd_len, data);
4810
break;
4811
4812
case L2CAP_DISCONN_REQ:
4813
err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
4814
break;
4815
4816
case L2CAP_DISCONN_RSP:
4817
l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
4818
break;
4819
4820
case L2CAP_ECHO_REQ:
4821
l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4822
break;
4823
4824
case L2CAP_ECHO_RSP:
4825
break;
4826
4827
case L2CAP_INFO_REQ:
4828
err = l2cap_information_req(conn, cmd, cmd_len, data);
4829
break;
4830
4831
case L2CAP_INFO_RSP:
4832
l2cap_information_rsp(conn, cmd, cmd_len, data);
4833
break;
4834
4835
default:
4836
BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4837
err = -EINVAL;
4838
break;
4839
}
4840
4841
return err;
4842
}
4843
4844
static int l2cap_le_connect_req(struct l2cap_conn *conn,
4845
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4846
u8 *data)
4847
{
4848
struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
4849
struct l2cap_le_conn_rsp rsp;
4850
struct l2cap_chan *chan, *pchan;
4851
u16 dcid, scid, credits, mtu, mps;
4852
__le16 psm;
4853
u8 result;
4854
4855
if (cmd_len != sizeof(*req))
4856
return -EPROTO;
4857
4858
scid = __le16_to_cpu(req->scid);
4859
mtu = __le16_to_cpu(req->mtu);
4860
mps = __le16_to_cpu(req->mps);
4861
psm = req->psm;
4862
dcid = 0;
4863
credits = 0;
4864
4865
if (mtu < 23 || mps < 23)
4866
return -EPROTO;
4867
4868
BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
4869
scid, mtu, mps);
4870
4871
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
4872
* page 1059:
4873
*
4874
* Valid range: 0x0001-0x00ff
4875
*
4876
* Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
4877
*/
4878
if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
4879
result = L2CAP_CR_LE_BAD_PSM;
4880
chan = NULL;
4881
goto response;
4882
}
4883
4884
/* Check if we have socket listening on psm */
4885
pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4886
&conn->hcon->dst, LE_LINK);
4887
if (!pchan) {
4888
result = L2CAP_CR_LE_BAD_PSM;
4889
chan = NULL;
4890
goto response;
4891
}
4892
4893
l2cap_chan_lock(pchan);
4894
4895
if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
4896
SMP_ALLOW_STK)) {
4897
result = pchan->sec_level == BT_SECURITY_MEDIUM ?
4898
L2CAP_CR_LE_ENCRYPTION : L2CAP_CR_LE_AUTHENTICATION;
4899
chan = NULL;
4900
goto response_unlock;
4901
}
4902
4903
/* Check for valid dynamic CID range */
4904
if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
4905
result = L2CAP_CR_LE_INVALID_SCID;
4906
chan = NULL;
4907
goto response_unlock;
4908
}
4909
4910
/* Check if we already have channel with that dcid */
4911
if (__l2cap_get_chan_by_dcid(conn, scid)) {
4912
result = L2CAP_CR_LE_SCID_IN_USE;
4913
chan = NULL;
4914
goto response_unlock;
4915
}
4916
4917
chan = pchan->ops->new_connection(pchan);
4918
if (!chan) {
4919
result = L2CAP_CR_LE_NO_MEM;
4920
goto response_unlock;
4921
}
4922
4923
bacpy(&chan->src, &conn->hcon->src);
4924
bacpy(&chan->dst, &conn->hcon->dst);
4925
chan->src_type = bdaddr_src_type(conn->hcon);
4926
chan->dst_type = bdaddr_dst_type(conn->hcon);
4927
chan->psm = psm;
4928
chan->dcid = scid;
4929
chan->omtu = mtu;
4930
chan->remote_mps = mps;
4931
4932
__l2cap_chan_add(conn, chan);
4933
4934
l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
4935
4936
dcid = chan->scid;
4937
credits = chan->rx_credits;
4938
4939
__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4940
4941
chan->ident = cmd->ident;
4942
4943
if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4944
l2cap_state_change(chan, BT_CONNECT2);
4945
/* The following result value is actually not defined
4946
* for LE CoC but we use it to let the function know
4947
* that it should bail out after doing its cleanup
4948
* instead of sending a response.
4949
*/
4950
result = L2CAP_CR_PEND;
4951
chan->ops->defer(chan);
4952
} else {
4953
l2cap_chan_ready(chan);
4954
result = L2CAP_CR_LE_SUCCESS;
4955
}
4956
4957
response_unlock:
4958
l2cap_chan_unlock(pchan);
4959
l2cap_chan_put(pchan);
4960
4961
if (result == L2CAP_CR_PEND)
4962
return 0;
4963
4964
response:
4965
if (chan) {
4966
rsp.mtu = cpu_to_le16(chan->imtu);
4967
rsp.mps = cpu_to_le16(chan->mps);
4968
} else {
4969
rsp.mtu = 0;
4970
rsp.mps = 0;
4971
}
4972
4973
rsp.dcid = cpu_to_le16(dcid);
4974
rsp.credits = cpu_to_le16(credits);
4975
rsp.result = cpu_to_le16(result);
4976
4977
l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
4978
4979
return 0;
4980
}
4981
4982
static inline int l2cap_le_credits(struct l2cap_conn *conn,
4983
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4984
u8 *data)
4985
{
4986
struct l2cap_le_credits *pkt;
4987
struct l2cap_chan *chan;
4988
u16 cid, credits, max_credits;
4989
4990
if (cmd_len != sizeof(*pkt))
4991
return -EPROTO;
4992
4993
pkt = (struct l2cap_le_credits *) data;
4994
cid = __le16_to_cpu(pkt->cid);
4995
credits = __le16_to_cpu(pkt->credits);
4996
4997
BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
4998
4999
chan = l2cap_get_chan_by_dcid(conn, cid);
5000
if (!chan)
5001
return -EBADSLT;
5002
5003
max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5004
if (credits > max_credits) {
5005
BT_ERR("LE credits overflow");
5006
l2cap_send_disconn_req(chan, ECONNRESET);
5007
5008
/* Return 0 so that we don't trigger an unnecessary
5009
* command reject packet.
5010
*/
5011
goto unlock;
5012
}
5013
5014
chan->tx_credits += credits;
5015
5016
/* Resume sending */
5017
l2cap_le_flowctl_send(chan);
5018
5019
if (chan->tx_credits)
5020
chan->ops->resume(chan);
5021
5022
unlock:
5023
l2cap_chan_unlock(chan);
5024
l2cap_chan_put(chan);
5025
5026
return 0;
5027
}
5028
5029
static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5030
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5031
u8 *data)
5032
{
5033
struct l2cap_ecred_conn_req *req = (void *) data;
5034
DEFINE_RAW_FLEX(struct l2cap_ecred_conn_rsp, pdu, dcid, L2CAP_ECRED_MAX_CID);
5035
struct l2cap_chan *chan, *pchan;
5036
u16 mtu, mps;
5037
__le16 psm;
5038
u8 result, len = 0;
5039
int i, num_scid;
5040
bool defer = false;
5041
5042
if (!enable_ecred)
5043
return -EINVAL;
5044
5045
if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
5046
result = L2CAP_CR_LE_INVALID_PARAMS;
5047
goto response;
5048
}
5049
5050
cmd_len -= sizeof(*req);
5051
num_scid = cmd_len / sizeof(u16);
5052
5053
if (num_scid > L2CAP_ECRED_MAX_CID) {
5054
result = L2CAP_CR_LE_INVALID_PARAMS;
5055
goto response;
5056
}
5057
5058
mtu = __le16_to_cpu(req->mtu);
5059
mps = __le16_to_cpu(req->mps);
5060
5061
if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
5062
result = L2CAP_CR_LE_UNACCEPT_PARAMS;
5063
goto response;
5064
}
5065
5066
psm = req->psm;
5067
5068
/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5069
* page 1059:
5070
*
5071
* Valid range: 0x0001-0x00ff
5072
*
5073
* Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5074
*/
5075
if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5076
result = L2CAP_CR_LE_BAD_PSM;
5077
goto response;
5078
}
5079
5080
BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
5081
5082
memset(pdu, 0, sizeof(*pdu));
5083
5084
/* Check if we have socket listening on psm */
5085
pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5086
&conn->hcon->dst, LE_LINK);
5087
if (!pchan) {
5088
result = L2CAP_CR_LE_BAD_PSM;
5089
goto response;
5090
}
5091
5092
l2cap_chan_lock(pchan);
5093
5094
if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5095
SMP_ALLOW_STK)) {
5096
result = L2CAP_CR_LE_AUTHENTICATION;
5097
goto unlock;
5098
}
5099
5100
result = L2CAP_CR_LE_SUCCESS;
5101
5102
for (i = 0; i < num_scid; i++) {
5103
u16 scid = __le16_to_cpu(req->scid[i]);
5104
5105
BT_DBG("scid[%d] 0x%4.4x", i, scid);
5106
5107
pdu->dcid[i] = 0x0000;
5108
len += sizeof(*pdu->dcid);
5109
5110
/* Check for valid dynamic CID range */
5111
if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5112
result = L2CAP_CR_LE_INVALID_SCID;
5113
continue;
5114
}
5115
5116
/* Check if we already have channel with that dcid */
5117
if (__l2cap_get_chan_by_dcid(conn, scid)) {
5118
result = L2CAP_CR_LE_SCID_IN_USE;
5119
continue;
5120
}
5121
5122
chan = pchan->ops->new_connection(pchan);
5123
if (!chan) {
5124
result = L2CAP_CR_LE_NO_MEM;
5125
continue;
5126
}
5127
5128
bacpy(&chan->src, &conn->hcon->src);
5129
bacpy(&chan->dst, &conn->hcon->dst);
5130
chan->src_type = bdaddr_src_type(conn->hcon);
5131
chan->dst_type = bdaddr_dst_type(conn->hcon);
5132
chan->psm = psm;
5133
chan->dcid = scid;
5134
chan->omtu = mtu;
5135
chan->remote_mps = mps;
5136
5137
__l2cap_chan_add(conn, chan);
5138
5139
l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
5140
5141
/* Init response */
5142
if (!pdu->credits) {
5143
pdu->mtu = cpu_to_le16(chan->imtu);
5144
pdu->mps = cpu_to_le16(chan->mps);
5145
pdu->credits = cpu_to_le16(chan->rx_credits);
5146
}
5147
5148
pdu->dcid[i] = cpu_to_le16(chan->scid);
5149
5150
__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5151
5152
chan->ident = cmd->ident;
5153
chan->mode = L2CAP_MODE_EXT_FLOWCTL;
5154
5155
if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5156
l2cap_state_change(chan, BT_CONNECT2);
5157
defer = true;
5158
chan->ops->defer(chan);
5159
} else {
5160
l2cap_chan_ready(chan);
5161
}
5162
}
5163
5164
unlock:
5165
l2cap_chan_unlock(pchan);
5166
l2cap_chan_put(pchan);
5167
5168
response:
5169
pdu->result = cpu_to_le16(result);
5170
5171
if (defer)
5172
return 0;
5173
5174
l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
5175
sizeof(*pdu) + len, pdu);
5176
5177
return 0;
5178
}
5179
5180
static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
5181
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5182
u8 *data)
5183
{
5184
struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5185
struct hci_conn *hcon = conn->hcon;
5186
u16 mtu, mps, credits, result;
5187
struct l2cap_chan *chan, *tmp;
5188
int err = 0, sec_level;
5189
int i = 0;
5190
5191
if (cmd_len < sizeof(*rsp))
5192
return -EPROTO;
5193
5194
mtu = __le16_to_cpu(rsp->mtu);
5195
mps = __le16_to_cpu(rsp->mps);
5196
credits = __le16_to_cpu(rsp->credits);
5197
result = __le16_to_cpu(rsp->result);
5198
5199
BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
5200
result);
5201
5202
cmd_len -= sizeof(*rsp);
5203
5204
list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5205
u16 dcid;
5206
5207
if (chan->ident != cmd->ident ||
5208
chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
5209
chan->state == BT_CONNECTED)
5210
continue;
5211
5212
l2cap_chan_lock(chan);
5213
5214
/* Check that there is a dcid for each pending channel */
5215
if (cmd_len < sizeof(dcid)) {
5216
l2cap_chan_del(chan, ECONNREFUSED);
5217
l2cap_chan_unlock(chan);
5218
continue;
5219
}
5220
5221
dcid = __le16_to_cpu(rsp->dcid[i++]);
5222
cmd_len -= sizeof(u16);
5223
5224
BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
5225
5226
/* Check if dcid is already in use */
5227
if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
5228
/* If a device receives a
5229
* L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
5230
* already-assigned Destination CID, then both the
5231
* original channel and the new channel shall be
5232
* immediately discarded and not used.
5233
*/
5234
l2cap_chan_del(chan, ECONNREFUSED);
5235
l2cap_chan_unlock(chan);
5236
chan = __l2cap_get_chan_by_dcid(conn, dcid);
5237
l2cap_chan_lock(chan);
5238
l2cap_chan_del(chan, ECONNRESET);
5239
l2cap_chan_unlock(chan);
5240
continue;
5241
}
5242
5243
switch (result) {
5244
case L2CAP_CR_LE_AUTHENTICATION:
5245
case L2CAP_CR_LE_ENCRYPTION:
5246
/* If we already have MITM protection we can't do
5247
* anything.
5248
*/
5249
if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5250
l2cap_chan_del(chan, ECONNREFUSED);
5251
break;
5252
}
5253
5254
sec_level = hcon->sec_level + 1;
5255
if (chan->sec_level < sec_level)
5256
chan->sec_level = sec_level;
5257
5258
/* We'll need to send a new Connect Request */
5259
clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
5260
5261
smp_conn_security(hcon, chan->sec_level);
5262
break;
5263
5264
case L2CAP_CR_LE_BAD_PSM:
5265
l2cap_chan_del(chan, ECONNREFUSED);
5266
break;
5267
5268
default:
5269
/* If dcid was not set it means channels was refused */
5270
if (!dcid) {
5271
l2cap_chan_del(chan, ECONNREFUSED);
5272
break;
5273
}
5274
5275
chan->ident = 0;
5276
chan->dcid = dcid;
5277
chan->omtu = mtu;
5278
chan->remote_mps = mps;
5279
chan->tx_credits = credits;
5280
l2cap_chan_ready(chan);
5281
break;
5282
}
5283
5284
l2cap_chan_unlock(chan);
5285
}
5286
5287
return err;
5288
}
5289
5290
static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
5291
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5292
u8 *data)
5293
{
5294
struct l2cap_ecred_reconf_req *req = (void *) data;
5295
struct l2cap_ecred_reconf_rsp rsp;
5296
u16 mtu, mps, result;
5297
struct l2cap_chan *chan;
5298
int i, num_scid;
5299
5300
if (!enable_ecred)
5301
return -EINVAL;
5302
5303
if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
5304
result = L2CAP_CR_LE_INVALID_PARAMS;
5305
goto respond;
5306
}
5307
5308
mtu = __le16_to_cpu(req->mtu);
5309
mps = __le16_to_cpu(req->mps);
5310
5311
BT_DBG("mtu %u mps %u", mtu, mps);
5312
5313
if (mtu < L2CAP_ECRED_MIN_MTU) {
5314
result = L2CAP_RECONF_INVALID_MTU;
5315
goto respond;
5316
}
5317
5318
if (mps < L2CAP_ECRED_MIN_MPS) {
5319
result = L2CAP_RECONF_INVALID_MPS;
5320
goto respond;
5321
}
5322
5323
cmd_len -= sizeof(*req);
5324
num_scid = cmd_len / sizeof(u16);
5325
result = L2CAP_RECONF_SUCCESS;
5326
5327
for (i = 0; i < num_scid; i++) {
5328
u16 scid;
5329
5330
scid = __le16_to_cpu(req->scid[i]);
5331
if (!scid)
5332
return -EPROTO;
5333
5334
chan = __l2cap_get_chan_by_dcid(conn, scid);
5335
if (!chan)
5336
continue;
5337
5338
/* If the MTU value is decreased for any of the included
5339
* channels, then the receiver shall disconnect all
5340
* included channels.
5341
*/
5342
if (chan->omtu > mtu) {
5343
BT_ERR("chan %p decreased MTU %u -> %u", chan,
5344
chan->omtu, mtu);
5345
result = L2CAP_RECONF_INVALID_MTU;
5346
}
5347
5348
chan->omtu = mtu;
5349
chan->remote_mps = mps;
5350
}
5351
5352
respond:
5353
rsp.result = cpu_to_le16(result);
5354
5355
l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
5356
&rsp);
5357
5358
return 0;
5359
}
5360
5361
static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
5362
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5363
u8 *data)
5364
{
5365
struct l2cap_chan *chan, *tmp;
5366
struct l2cap_ecred_conn_rsp *rsp = (void *) data;
5367
u16 result;
5368
5369
if (cmd_len < sizeof(*rsp))
5370
return -EPROTO;
5371
5372
result = __le16_to_cpu(rsp->result);
5373
5374
BT_DBG("result 0x%4.4x", rsp->result);
5375
5376
if (!result)
5377
return 0;
5378
5379
list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
5380
if (chan->ident != cmd->ident)
5381
continue;
5382
5383
l2cap_chan_del(chan, ECONNRESET);
5384
}
5385
5386
return 0;
5387
}
5388
5389
static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5390
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5391
u8 *data)
5392
{
5393
struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5394
struct l2cap_chan *chan;
5395
5396
if (cmd_len < sizeof(*rej))
5397
return -EPROTO;
5398
5399
chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5400
if (!chan)
5401
goto done;
5402
5403
chan = l2cap_chan_hold_unless_zero(chan);
5404
if (!chan)
5405
goto done;
5406
5407
l2cap_chan_lock(chan);
5408
l2cap_chan_del(chan, ECONNREFUSED);
5409
l2cap_chan_unlock(chan);
5410
l2cap_chan_put(chan);
5411
5412
done:
5413
return 0;
5414
}
5415
5416
static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5417
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5418
u8 *data)
5419
{
5420
int err = 0;
5421
5422
switch (cmd->code) {
5423
case L2CAP_COMMAND_REJ:
5424
l2cap_le_command_rej(conn, cmd, cmd_len, data);
5425
break;
5426
5427
case L2CAP_CONN_PARAM_UPDATE_REQ:
5428
err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5429
break;
5430
5431
case L2CAP_CONN_PARAM_UPDATE_RSP:
5432
break;
5433
5434
case L2CAP_LE_CONN_RSP:
5435
l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5436
break;
5437
5438
case L2CAP_LE_CONN_REQ:
5439
err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5440
break;
5441
5442
case L2CAP_LE_CREDITS:
5443
err = l2cap_le_credits(conn, cmd, cmd_len, data);
5444
break;
5445
5446
case L2CAP_ECRED_CONN_REQ:
5447
err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
5448
break;
5449
5450
case L2CAP_ECRED_CONN_RSP:
5451
err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
5452
break;
5453
5454
case L2CAP_ECRED_RECONF_REQ:
5455
err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
5456
break;
5457
5458
case L2CAP_ECRED_RECONF_RSP:
5459
err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
5460
break;
5461
5462
case L2CAP_DISCONN_REQ:
5463
err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5464
break;
5465
5466
case L2CAP_DISCONN_RSP:
5467
l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5468
break;
5469
5470
default:
5471
BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5472
err = -EINVAL;
5473
break;
5474
}
5475
5476
return err;
5477
}
5478
5479
static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5480
struct sk_buff *skb)
5481
{
5482
struct hci_conn *hcon = conn->hcon;
5483
struct l2cap_cmd_hdr *cmd;
5484
u16 len;
5485
int err;
5486
5487
if (hcon->type != LE_LINK)
5488
goto drop;
5489
5490
if (skb->len < L2CAP_CMD_HDR_SIZE)
5491
goto drop;
5492
5493
cmd = (void *) skb->data;
5494
skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5495
5496
len = le16_to_cpu(cmd->len);
5497
5498
BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5499
5500
if (len != skb->len || !cmd->ident) {
5501
BT_DBG("corrupted command");
5502
goto drop;
5503
}
5504
5505
err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5506
if (err) {
5507
struct l2cap_cmd_rej_unk rej;
5508
5509
BT_ERR("Wrong link type (%d)", err);
5510
5511
rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5512
l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5513
sizeof(rej), &rej);
5514
}
5515
5516
drop:
5517
kfree_skb(skb);
5518
}
5519
5520
static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
5521
{
5522
struct l2cap_cmd_rej_unk rej;
5523
5524
rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5525
l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5526
}
5527
5528
static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5529
struct sk_buff *skb)
5530
{
5531
struct hci_conn *hcon = conn->hcon;
5532
struct l2cap_cmd_hdr *cmd;
5533
int err;
5534
5535
l2cap_raw_recv(conn, skb);
5536
5537
if (hcon->type != ACL_LINK)
5538
goto drop;
5539
5540
while (skb->len >= L2CAP_CMD_HDR_SIZE) {
5541
u16 len;
5542
5543
cmd = (void *) skb->data;
5544
skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5545
5546
len = le16_to_cpu(cmd->len);
5547
5548
BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
5549
cmd->ident);
5550
5551
if (len > skb->len || !cmd->ident) {
5552
BT_DBG("corrupted command");
5553
l2cap_sig_send_rej(conn, cmd->ident);
5554
skb_pull(skb, len > skb->len ? skb->len : len);
5555
continue;
5556
}
5557
5558
err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
5559
if (err) {
5560
BT_ERR("Wrong link type (%d)", err);
5561
l2cap_sig_send_rej(conn, cmd->ident);
5562
}
5563
5564
skb_pull(skb, len);
5565
}
5566
5567
if (skb->len > 0) {
5568
BT_DBG("corrupted command");
5569
l2cap_sig_send_rej(conn, 0);
5570
}
5571
5572
drop:
5573
kfree_skb(skb);
5574
}
5575
5576
static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
5577
{
5578
u16 our_fcs, rcv_fcs;
5579
int hdr_size;
5580
5581
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5582
hdr_size = L2CAP_EXT_HDR_SIZE;
5583
else
5584
hdr_size = L2CAP_ENH_HDR_SIZE;
5585
5586
if (chan->fcs == L2CAP_FCS_CRC16) {
5587
skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5588
rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5589
our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5590
5591
if (our_fcs != rcv_fcs)
5592
return -EBADMSG;
5593
}
5594
return 0;
5595
}
5596
5597
static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5598
{
5599
struct l2cap_ctrl control;
5600
5601
BT_DBG("chan %p", chan);
5602
5603
memset(&control, 0, sizeof(control));
5604
control.sframe = 1;
5605
control.final = 1;
5606
control.reqseq = chan->buffer_seq;
5607
set_bit(CONN_SEND_FBIT, &chan->conn_state);
5608
5609
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5610
control.super = L2CAP_SUPER_RNR;
5611
l2cap_send_sframe(chan, &control);
5612
}
5613
5614
if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5615
chan->unacked_frames > 0)
5616
__set_retrans_timer(chan);
5617
5618
/* Send pending iframes */
5619
l2cap_ertm_send(chan);
5620
5621
if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5622
test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5623
/* F-bit wasn't sent in an s-frame or i-frame yet, so
5624
* send it now.
5625
*/
5626
control.super = L2CAP_SUPER_RR;
5627
l2cap_send_sframe(chan, &control);
5628
}
5629
}
5630
5631
static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5632
struct sk_buff **last_frag)
5633
{
5634
/* skb->len reflects data in skb as well as all fragments
5635
* skb->data_len reflects only data in fragments
5636
*/
5637
if (!skb_has_frag_list(skb))
5638
skb_shinfo(skb)->frag_list = new_frag;
5639
5640
new_frag->next = NULL;
5641
5642
(*last_frag)->next = new_frag;
5643
*last_frag = new_frag;
5644
5645
skb->len += new_frag->len;
5646
skb->data_len += new_frag->len;
5647
skb->truesize += new_frag->truesize;
5648
}
5649
5650
static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5651
struct l2cap_ctrl *control)
5652
{
5653
int err = -EINVAL;
5654
5655
switch (control->sar) {
5656
case L2CAP_SAR_UNSEGMENTED:
5657
if (chan->sdu)
5658
break;
5659
5660
err = chan->ops->recv(chan, skb);
5661
break;
5662
5663
case L2CAP_SAR_START:
5664
if (chan->sdu)
5665
break;
5666
5667
if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5668
break;
5669
5670
chan->sdu_len = get_unaligned_le16(skb->data);
5671
skb_pull(skb, L2CAP_SDULEN_SIZE);
5672
5673
if (chan->sdu_len > chan->imtu) {
5674
err = -EMSGSIZE;
5675
break;
5676
}
5677
5678
if (skb->len >= chan->sdu_len)
5679
break;
5680
5681
chan->sdu = skb;
5682
chan->sdu_last_frag = skb;
5683
5684
skb = NULL;
5685
err = 0;
5686
break;
5687
5688
case L2CAP_SAR_CONTINUE:
5689
if (!chan->sdu)
5690
break;
5691
5692
append_skb_frag(chan->sdu, skb,
5693
&chan->sdu_last_frag);
5694
skb = NULL;
5695
5696
if (chan->sdu->len >= chan->sdu_len)
5697
break;
5698
5699
err = 0;
5700
break;
5701
5702
case L2CAP_SAR_END:
5703
if (!chan->sdu)
5704
break;
5705
5706
append_skb_frag(chan->sdu, skb,
5707
&chan->sdu_last_frag);
5708
skb = NULL;
5709
5710
if (chan->sdu->len != chan->sdu_len)
5711
break;
5712
5713
err = chan->ops->recv(chan, chan->sdu);
5714
5715
if (!err) {
5716
/* Reassembly complete */
5717
chan->sdu = NULL;
5718
chan->sdu_last_frag = NULL;
5719
chan->sdu_len = 0;
5720
}
5721
break;
5722
}
5723
5724
if (err) {
5725
kfree_skb(skb);
5726
kfree_skb(chan->sdu);
5727
chan->sdu = NULL;
5728
chan->sdu_last_frag = NULL;
5729
chan->sdu_len = 0;
5730
}
5731
5732
return err;
5733
}
5734
5735
static int l2cap_resegment(struct l2cap_chan *chan)
5736
{
5737
/* Placeholder */
5738
return 0;
5739
}
5740
5741
void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5742
{
5743
u8 event;
5744
5745
if (chan->mode != L2CAP_MODE_ERTM)
5746
return;
5747
5748
event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5749
l2cap_tx(chan, NULL, NULL, event);
5750
}
5751
5752
static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5753
{
5754
int err = 0;
5755
/* Pass sequential frames to l2cap_reassemble_sdu()
5756
* until a gap is encountered.
5757
*/
5758
5759
BT_DBG("chan %p", chan);
5760
5761
while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5762
struct sk_buff *skb;
5763
BT_DBG("Searching for skb with txseq %d (queue len %d)",
5764
chan->buffer_seq, skb_queue_len(&chan->srej_q));
5765
5766
skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5767
5768
if (!skb)
5769
break;
5770
5771
skb_unlink(skb, &chan->srej_q);
5772
chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5773
err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
5774
if (err)
5775
break;
5776
}
5777
5778
if (skb_queue_empty(&chan->srej_q)) {
5779
chan->rx_state = L2CAP_RX_STATE_RECV;
5780
l2cap_send_ack(chan);
5781
}
5782
5783
return err;
5784
}
5785
5786
static void l2cap_handle_srej(struct l2cap_chan *chan,
5787
struct l2cap_ctrl *control)
5788
{
5789
struct sk_buff *skb;
5790
5791
BT_DBG("chan %p, control %p", chan, control);
5792
5793
if (control->reqseq == chan->next_tx_seq) {
5794
BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5795
l2cap_send_disconn_req(chan, ECONNRESET);
5796
return;
5797
}
5798
5799
skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5800
5801
if (skb == NULL) {
5802
BT_DBG("Seq %d not available for retransmission",
5803
control->reqseq);
5804
return;
5805
}
5806
5807
if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5808
BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5809
l2cap_send_disconn_req(chan, ECONNRESET);
5810
return;
5811
}
5812
5813
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5814
5815
if (control->poll) {
5816
l2cap_pass_to_tx(chan, control);
5817
5818
set_bit(CONN_SEND_FBIT, &chan->conn_state);
5819
l2cap_retransmit(chan, control);
5820
l2cap_ertm_send(chan);
5821
5822
if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5823
set_bit(CONN_SREJ_ACT, &chan->conn_state);
5824
chan->srej_save_reqseq = control->reqseq;
5825
}
5826
} else {
5827
l2cap_pass_to_tx_fbit(chan, control);
5828
5829
if (control->final) {
5830
if (chan->srej_save_reqseq != control->reqseq ||
5831
!test_and_clear_bit(CONN_SREJ_ACT,
5832
&chan->conn_state))
5833
l2cap_retransmit(chan, control);
5834
} else {
5835
l2cap_retransmit(chan, control);
5836
if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5837
set_bit(CONN_SREJ_ACT, &chan->conn_state);
5838
chan->srej_save_reqseq = control->reqseq;
5839
}
5840
}
5841
}
5842
}
5843
5844
static void l2cap_handle_rej(struct l2cap_chan *chan,
5845
struct l2cap_ctrl *control)
5846
{
5847
struct sk_buff *skb;
5848
5849
BT_DBG("chan %p, control %p", chan, control);
5850
5851
if (control->reqseq == chan->next_tx_seq) {
5852
BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5853
l2cap_send_disconn_req(chan, ECONNRESET);
5854
return;
5855
}
5856
5857
skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5858
5859
if (chan->max_tx && skb &&
5860
bt_cb(skb)->l2cap.retries >= chan->max_tx) {
5861
BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5862
l2cap_send_disconn_req(chan, ECONNRESET);
5863
return;
5864
}
5865
5866
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5867
5868
l2cap_pass_to_tx(chan, control);
5869
5870
if (control->final) {
5871
if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5872
l2cap_retransmit_all(chan, control);
5873
} else {
5874
l2cap_retransmit_all(chan, control);
5875
l2cap_ertm_send(chan);
5876
if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5877
set_bit(CONN_REJ_ACT, &chan->conn_state);
5878
}
5879
}
5880
5881
static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5882
{
5883
BT_DBG("chan %p, txseq %d", chan, txseq);
5884
5885
BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5886
chan->expected_tx_seq);
5887
5888
if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5889
if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5890
chan->tx_win) {
5891
/* See notes below regarding "double poll" and
5892
* invalid packets.
5893
*/
5894
if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5895
BT_DBG("Invalid/Ignore - after SREJ");
5896
return L2CAP_TXSEQ_INVALID_IGNORE;
5897
} else {
5898
BT_DBG("Invalid - in window after SREJ sent");
5899
return L2CAP_TXSEQ_INVALID;
5900
}
5901
}
5902
5903
if (chan->srej_list.head == txseq) {
5904
BT_DBG("Expected SREJ");
5905
return L2CAP_TXSEQ_EXPECTED_SREJ;
5906
}
5907
5908
if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
5909
BT_DBG("Duplicate SREJ - txseq already stored");
5910
return L2CAP_TXSEQ_DUPLICATE_SREJ;
5911
}
5912
5913
if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
5914
BT_DBG("Unexpected SREJ - not requested");
5915
return L2CAP_TXSEQ_UNEXPECTED_SREJ;
5916
}
5917
}
5918
5919
if (chan->expected_tx_seq == txseq) {
5920
if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
5921
chan->tx_win) {
5922
BT_DBG("Invalid - txseq outside tx window");
5923
return L2CAP_TXSEQ_INVALID;
5924
} else {
5925
BT_DBG("Expected");
5926
return L2CAP_TXSEQ_EXPECTED;
5927
}
5928
}
5929
5930
if (__seq_offset(chan, txseq, chan->last_acked_seq) <
5931
__seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
5932
BT_DBG("Duplicate - expected_tx_seq later than txseq");
5933
return L2CAP_TXSEQ_DUPLICATE;
5934
}
5935
5936
if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
5937
/* A source of invalid packets is a "double poll" condition,
5938
* where delays cause us to send multiple poll packets. If
5939
* the remote stack receives and processes both polls,
5940
* sequence numbers can wrap around in such a way that a
5941
* resent frame has a sequence number that looks like new data
5942
* with a sequence gap. This would trigger an erroneous SREJ
5943
* request.
5944
*
5945
* Fortunately, this is impossible with a tx window that's
5946
* less than half of the maximum sequence number, which allows
5947
* invalid frames to be safely ignored.
5948
*
5949
* With tx window sizes greater than half of the tx window
5950
* maximum, the frame is invalid and cannot be ignored. This
5951
* causes a disconnect.
5952
*/
5953
5954
if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5955
BT_DBG("Invalid/Ignore - txseq outside tx window");
5956
return L2CAP_TXSEQ_INVALID_IGNORE;
5957
} else {
5958
BT_DBG("Invalid - txseq outside tx window");
5959
return L2CAP_TXSEQ_INVALID;
5960
}
5961
} else {
5962
BT_DBG("Unexpected - txseq indicates missing frames");
5963
return L2CAP_TXSEQ_UNEXPECTED;
5964
}
5965
}
5966
5967
static int l2cap_rx_state_recv(struct l2cap_chan *chan,
5968
struct l2cap_ctrl *control,
5969
struct sk_buff *skb, u8 event)
5970
{
5971
struct l2cap_ctrl local_control;
5972
int err = 0;
5973
bool skb_in_use = false;
5974
5975
BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
5976
event);
5977
5978
switch (event) {
5979
case L2CAP_EV_RECV_IFRAME:
5980
switch (l2cap_classify_txseq(chan, control->txseq)) {
5981
case L2CAP_TXSEQ_EXPECTED:
5982
l2cap_pass_to_tx(chan, control);
5983
5984
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5985
BT_DBG("Busy, discarding expected seq %d",
5986
control->txseq);
5987
break;
5988
}
5989
5990
chan->expected_tx_seq = __next_seq(chan,
5991
control->txseq);
5992
5993
chan->buffer_seq = chan->expected_tx_seq;
5994
skb_in_use = true;
5995
5996
/* l2cap_reassemble_sdu may free skb, hence invalidate
5997
* control, so make a copy in advance to use it after
5998
* l2cap_reassemble_sdu returns and to avoid the race
5999
* condition, for example:
6000
*
6001
* The current thread calls:
6002
* l2cap_reassemble_sdu
6003
* chan->ops->recv == l2cap_sock_recv_cb
6004
* __sock_queue_rcv_skb
6005
* Another thread calls:
6006
* bt_sock_recvmsg
6007
* skb_recv_datagram
6008
* skb_free_datagram
6009
* Then the current thread tries to access control, but
6010
* it was freed by skb_free_datagram.
6011
*/
6012
local_control = *control;
6013
err = l2cap_reassemble_sdu(chan, skb, control);
6014
if (err)
6015
break;
6016
6017
if (local_control.final) {
6018
if (!test_and_clear_bit(CONN_REJ_ACT,
6019
&chan->conn_state)) {
6020
local_control.final = 0;
6021
l2cap_retransmit_all(chan, &local_control);
6022
l2cap_ertm_send(chan);
6023
}
6024
}
6025
6026
if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6027
l2cap_send_ack(chan);
6028
break;
6029
case L2CAP_TXSEQ_UNEXPECTED:
6030
l2cap_pass_to_tx(chan, control);
6031
6032
/* Can't issue SREJ frames in the local busy state.
6033
* Drop this frame, it will be seen as missing
6034
* when local busy is exited.
6035
*/
6036
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6037
BT_DBG("Busy, discarding unexpected seq %d",
6038
control->txseq);
6039
break;
6040
}
6041
6042
/* There was a gap in the sequence, so an SREJ
6043
* must be sent for each missing frame. The
6044
* current frame is stored for later use.
6045
*/
6046
skb_queue_tail(&chan->srej_q, skb);
6047
skb_in_use = true;
6048
BT_DBG("Queued %p (queue len %d)", skb,
6049
skb_queue_len(&chan->srej_q));
6050
6051
clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6052
l2cap_seq_list_clear(&chan->srej_list);
6053
l2cap_send_srej(chan, control->txseq);
6054
6055
chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6056
break;
6057
case L2CAP_TXSEQ_DUPLICATE:
6058
l2cap_pass_to_tx(chan, control);
6059
break;
6060
case L2CAP_TXSEQ_INVALID_IGNORE:
6061
break;
6062
case L2CAP_TXSEQ_INVALID:
6063
default:
6064
l2cap_send_disconn_req(chan, ECONNRESET);
6065
break;
6066
}
6067
break;
6068
case L2CAP_EV_RECV_RR:
6069
l2cap_pass_to_tx(chan, control);
6070
if (control->final) {
6071
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6072
6073
if (!test_and_clear_bit(CONN_REJ_ACT,
6074
&chan->conn_state)) {
6075
control->final = 0;
6076
l2cap_retransmit_all(chan, control);
6077
}
6078
6079
l2cap_ertm_send(chan);
6080
} else if (control->poll) {
6081
l2cap_send_i_or_rr_or_rnr(chan);
6082
} else {
6083
if (test_and_clear_bit(CONN_REMOTE_BUSY,
6084
&chan->conn_state) &&
6085
chan->unacked_frames)
6086
__set_retrans_timer(chan);
6087
6088
l2cap_ertm_send(chan);
6089
}
6090
break;
6091
case L2CAP_EV_RECV_RNR:
6092
set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6093
l2cap_pass_to_tx(chan, control);
6094
if (control && control->poll) {
6095
set_bit(CONN_SEND_FBIT, &chan->conn_state);
6096
l2cap_send_rr_or_rnr(chan, 0);
6097
}
6098
__clear_retrans_timer(chan);
6099
l2cap_seq_list_clear(&chan->retrans_list);
6100
break;
6101
case L2CAP_EV_RECV_REJ:
6102
l2cap_handle_rej(chan, control);
6103
break;
6104
case L2CAP_EV_RECV_SREJ:
6105
l2cap_handle_srej(chan, control);
6106
break;
6107
default:
6108
break;
6109
}
6110
6111
if (skb && !skb_in_use) {
6112
BT_DBG("Freeing %p", skb);
6113
kfree_skb(skb);
6114
}
6115
6116
return err;
6117
}
6118
6119
static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6120
struct l2cap_ctrl *control,
6121
struct sk_buff *skb, u8 event)
6122
{
6123
int err = 0;
6124
u16 txseq = control->txseq;
6125
bool skb_in_use = false;
6126
6127
BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6128
event);
6129
6130
switch (event) {
6131
case L2CAP_EV_RECV_IFRAME:
6132
switch (l2cap_classify_txseq(chan, txseq)) {
6133
case L2CAP_TXSEQ_EXPECTED:
6134
/* Keep frame for reassembly later */
6135
l2cap_pass_to_tx(chan, control);
6136
skb_queue_tail(&chan->srej_q, skb);
6137
skb_in_use = true;
6138
BT_DBG("Queued %p (queue len %d)", skb,
6139
skb_queue_len(&chan->srej_q));
6140
6141
chan->expected_tx_seq = __next_seq(chan, txseq);
6142
break;
6143
case L2CAP_TXSEQ_EXPECTED_SREJ:
6144
l2cap_seq_list_pop(&chan->srej_list);
6145
6146
l2cap_pass_to_tx(chan, control);
6147
skb_queue_tail(&chan->srej_q, skb);
6148
skb_in_use = true;
6149
BT_DBG("Queued %p (queue len %d)", skb,
6150
skb_queue_len(&chan->srej_q));
6151
6152
err = l2cap_rx_queued_iframes(chan);
6153
if (err)
6154
break;
6155
6156
break;
6157
case L2CAP_TXSEQ_UNEXPECTED:
6158
/* Got a frame that can't be reassembled yet.
6159
* Save it for later, and send SREJs to cover
6160
* the missing frames.
6161
*/
6162
skb_queue_tail(&chan->srej_q, skb);
6163
skb_in_use = true;
6164
BT_DBG("Queued %p (queue len %d)", skb,
6165
skb_queue_len(&chan->srej_q));
6166
6167
l2cap_pass_to_tx(chan, control);
6168
l2cap_send_srej(chan, control->txseq);
6169
break;
6170
case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6171
/* This frame was requested with an SREJ, but
6172
* some expected retransmitted frames are
6173
* missing. Request retransmission of missing
6174
* SREJ'd frames.
6175
*/
6176
skb_queue_tail(&chan->srej_q, skb);
6177
skb_in_use = true;
6178
BT_DBG("Queued %p (queue len %d)", skb,
6179
skb_queue_len(&chan->srej_q));
6180
6181
l2cap_pass_to_tx(chan, control);
6182
l2cap_send_srej_list(chan, control->txseq);
6183
break;
6184
case L2CAP_TXSEQ_DUPLICATE_SREJ:
6185
/* We've already queued this frame. Drop this copy. */
6186
l2cap_pass_to_tx(chan, control);
6187
break;
6188
case L2CAP_TXSEQ_DUPLICATE:
6189
/* Expecting a later sequence number, so this frame
6190
* was already received. Ignore it completely.
6191
*/
6192
break;
6193
case L2CAP_TXSEQ_INVALID_IGNORE:
6194
break;
6195
case L2CAP_TXSEQ_INVALID:
6196
default:
6197
l2cap_send_disconn_req(chan, ECONNRESET);
6198
break;
6199
}
6200
break;
6201
case L2CAP_EV_RECV_RR:
6202
l2cap_pass_to_tx(chan, control);
6203
if (control->final) {
6204
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6205
6206
if (!test_and_clear_bit(CONN_REJ_ACT,
6207
&chan->conn_state)) {
6208
control->final = 0;
6209
l2cap_retransmit_all(chan, control);
6210
}
6211
6212
l2cap_ertm_send(chan);
6213
} else if (control->poll) {
6214
if (test_and_clear_bit(CONN_REMOTE_BUSY,
6215
&chan->conn_state) &&
6216
chan->unacked_frames) {
6217
__set_retrans_timer(chan);
6218
}
6219
6220
set_bit(CONN_SEND_FBIT, &chan->conn_state);
6221
l2cap_send_srej_tail(chan);
6222
} else {
6223
if (test_and_clear_bit(CONN_REMOTE_BUSY,
6224
&chan->conn_state) &&
6225
chan->unacked_frames)
6226
__set_retrans_timer(chan);
6227
6228
l2cap_send_ack(chan);
6229
}
6230
break;
6231
case L2CAP_EV_RECV_RNR:
6232
set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6233
l2cap_pass_to_tx(chan, control);
6234
if (control->poll) {
6235
l2cap_send_srej_tail(chan);
6236
} else {
6237
struct l2cap_ctrl rr_control;
6238
memset(&rr_control, 0, sizeof(rr_control));
6239
rr_control.sframe = 1;
6240
rr_control.super = L2CAP_SUPER_RR;
6241
rr_control.reqseq = chan->buffer_seq;
6242
l2cap_send_sframe(chan, &rr_control);
6243
}
6244
6245
break;
6246
case L2CAP_EV_RECV_REJ:
6247
l2cap_handle_rej(chan, control);
6248
break;
6249
case L2CAP_EV_RECV_SREJ:
6250
l2cap_handle_srej(chan, control);
6251
break;
6252
}
6253
6254
if (skb && !skb_in_use) {
6255
BT_DBG("Freeing %p", skb);
6256
kfree_skb(skb);
6257
}
6258
6259
return err;
6260
}
6261
6262
static int l2cap_finish_move(struct l2cap_chan *chan)
6263
{
6264
BT_DBG("chan %p", chan);
6265
6266
chan->rx_state = L2CAP_RX_STATE_RECV;
6267
chan->conn->mtu = chan->conn->hcon->mtu;
6268
6269
return l2cap_resegment(chan);
6270
}
6271
6272
static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6273
struct l2cap_ctrl *control,
6274
struct sk_buff *skb, u8 event)
6275
{
6276
int err;
6277
6278
BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6279
event);
6280
6281
if (!control->poll)
6282
return -EPROTO;
6283
6284
l2cap_process_reqseq(chan, control->reqseq);
6285
6286
if (!skb_queue_empty(&chan->tx_q))
6287
chan->tx_send_head = skb_peek(&chan->tx_q);
6288
else
6289
chan->tx_send_head = NULL;
6290
6291
/* Rewind next_tx_seq to the point expected
6292
* by the receiver.
6293
*/
6294
chan->next_tx_seq = control->reqseq;
6295
chan->unacked_frames = 0;
6296
6297
err = l2cap_finish_move(chan);
6298
if (err)
6299
return err;
6300
6301
set_bit(CONN_SEND_FBIT, &chan->conn_state);
6302
l2cap_send_i_or_rr_or_rnr(chan);
6303
6304
if (event == L2CAP_EV_RECV_IFRAME)
6305
return -EPROTO;
6306
6307
return l2cap_rx_state_recv(chan, control, NULL, event);
6308
}
6309
6310
static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6311
struct l2cap_ctrl *control,
6312
struct sk_buff *skb, u8 event)
6313
{
6314
int err;
6315
6316
if (!control->final)
6317
return -EPROTO;
6318
6319
clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6320
6321
chan->rx_state = L2CAP_RX_STATE_RECV;
6322
l2cap_process_reqseq(chan, control->reqseq);
6323
6324
if (!skb_queue_empty(&chan->tx_q))
6325
chan->tx_send_head = skb_peek(&chan->tx_q);
6326
else
6327
chan->tx_send_head = NULL;
6328
6329
/* Rewind next_tx_seq to the point expected
6330
* by the receiver.
6331
*/
6332
chan->next_tx_seq = control->reqseq;
6333
chan->unacked_frames = 0;
6334
chan->conn->mtu = chan->conn->hcon->mtu;
6335
6336
err = l2cap_resegment(chan);
6337
6338
if (!err)
6339
err = l2cap_rx_state_recv(chan, control, skb, event);
6340
6341
return err;
6342
}
6343
6344
static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6345
{
6346
/* Make sure reqseq is for a packet that has been sent but not acked */
6347
u16 unacked;
6348
6349
unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6350
return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6351
}
6352
6353
static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6354
struct sk_buff *skb, u8 event)
6355
{
6356
int err = 0;
6357
6358
BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6359
control, skb, event, chan->rx_state);
6360
6361
if (__valid_reqseq(chan, control->reqseq)) {
6362
switch (chan->rx_state) {
6363
case L2CAP_RX_STATE_RECV:
6364
err = l2cap_rx_state_recv(chan, control, skb, event);
6365
break;
6366
case L2CAP_RX_STATE_SREJ_SENT:
6367
err = l2cap_rx_state_srej_sent(chan, control, skb,
6368
event);
6369
break;
6370
case L2CAP_RX_STATE_WAIT_P:
6371
err = l2cap_rx_state_wait_p(chan, control, skb, event);
6372
break;
6373
case L2CAP_RX_STATE_WAIT_F:
6374
err = l2cap_rx_state_wait_f(chan, control, skb, event);
6375
break;
6376
default:
6377
/* shut it down */
6378
break;
6379
}
6380
} else {
6381
BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6382
control->reqseq, chan->next_tx_seq,
6383
chan->expected_ack_seq);
6384
l2cap_send_disconn_req(chan, ECONNRESET);
6385
}
6386
6387
return err;
6388
}
6389
6390
static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6391
struct sk_buff *skb)
6392
{
6393
/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
6394
* the txseq field in advance to use it after l2cap_reassemble_sdu
6395
* returns and to avoid the race condition, for example:
6396
*
6397
* The current thread calls:
6398
* l2cap_reassemble_sdu
6399
* chan->ops->recv == l2cap_sock_recv_cb
6400
* __sock_queue_rcv_skb
6401
* Another thread calls:
6402
* bt_sock_recvmsg
6403
* skb_recv_datagram
6404
* skb_free_datagram
6405
* Then the current thread tries to access control, but it was freed by
6406
* skb_free_datagram.
6407
*/
6408
u16 txseq = control->txseq;
6409
6410
BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6411
chan->rx_state);
6412
6413
if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
6414
l2cap_pass_to_tx(chan, control);
6415
6416
BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
6417
__next_seq(chan, chan->buffer_seq));
6418
6419
chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6420
6421
l2cap_reassemble_sdu(chan, skb, control);
6422
} else {
6423
if (chan->sdu) {
6424
kfree_skb(chan->sdu);
6425
chan->sdu = NULL;
6426
}
6427
chan->sdu_last_frag = NULL;
6428
chan->sdu_len = 0;
6429
6430
if (skb) {
6431
BT_DBG("Freeing %p", skb);
6432
kfree_skb(skb);
6433
}
6434
}
6435
6436
chan->last_acked_seq = txseq;
6437
chan->expected_tx_seq = __next_seq(chan, txseq);
6438
6439
return 0;
6440
}
6441
6442
static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6443
{
6444
struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6445
u16 len;
6446
u8 event;
6447
6448
__unpack_control(chan, skb);
6449
6450
len = skb->len;
6451
6452
/*
6453
* We can just drop the corrupted I-frame here.
6454
* Receiver will miss it and start proper recovery
6455
* procedures and ask for retransmission.
6456
*/
6457
if (l2cap_check_fcs(chan, skb))
6458
goto drop;
6459
6460
if (!control->sframe && control->sar == L2CAP_SAR_START)
6461
len -= L2CAP_SDULEN_SIZE;
6462
6463
if (chan->fcs == L2CAP_FCS_CRC16)
6464
len -= L2CAP_FCS_SIZE;
6465
6466
if (len > chan->mps) {
6467
l2cap_send_disconn_req(chan, ECONNRESET);
6468
goto drop;
6469
}
6470
6471
if (chan->ops->filter) {
6472
if (chan->ops->filter(chan, skb))
6473
goto drop;
6474
}
6475
6476
if (!control->sframe) {
6477
int err;
6478
6479
BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6480
control->sar, control->reqseq, control->final,
6481
control->txseq);
6482
6483
/* Validate F-bit - F=0 always valid, F=1 only
6484
* valid in TX WAIT_F
6485
*/
6486
if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6487
goto drop;
6488
6489
if (chan->mode != L2CAP_MODE_STREAMING) {
6490
event = L2CAP_EV_RECV_IFRAME;
6491
err = l2cap_rx(chan, control, skb, event);
6492
} else {
6493
err = l2cap_stream_rx(chan, control, skb);
6494
}
6495
6496
if (err)
6497
l2cap_send_disconn_req(chan, ECONNRESET);
6498
} else {
6499
const u8 rx_func_to_event[4] = {
6500
L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6501
L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6502
};
6503
6504
/* Only I-frames are expected in streaming mode */
6505
if (chan->mode == L2CAP_MODE_STREAMING)
6506
goto drop;
6507
6508
BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6509
control->reqseq, control->final, control->poll,
6510
control->super);
6511
6512
if (len != 0) {
6513
BT_ERR("Trailing bytes: %d in sframe", len);
6514
l2cap_send_disconn_req(chan, ECONNRESET);
6515
goto drop;
6516
}
6517
6518
/* Validate F and P bits */
6519
if (control->final && (control->poll ||
6520
chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6521
goto drop;
6522
6523
event = rx_func_to_event[control->super];
6524
if (l2cap_rx(chan, control, skb, event))
6525
l2cap_send_disconn_req(chan, ECONNRESET);
6526
}
6527
6528
return 0;
6529
6530
drop:
6531
kfree_skb(skb);
6532
return 0;
6533
}
6534
6535
static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6536
{
6537
struct l2cap_conn *conn = chan->conn;
6538
struct l2cap_le_credits pkt;
6539
u16 return_credits = l2cap_le_rx_credits(chan);
6540
6541
if (chan->rx_credits >= return_credits)
6542
return;
6543
6544
return_credits -= chan->rx_credits;
6545
6546
BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6547
6548
chan->rx_credits += return_credits;
6549
6550
pkt.cid = cpu_to_le16(chan->scid);
6551
pkt.credits = cpu_to_le16(return_credits);
6552
6553
chan->ident = l2cap_get_ident(conn);
6554
6555
l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6556
}
6557
6558
void l2cap_chan_rx_avail(struct l2cap_chan *chan, ssize_t rx_avail)
6559
{
6560
if (chan->rx_avail == rx_avail)
6561
return;
6562
6563
BT_DBG("chan %p has %zd bytes avail for rx", chan, rx_avail);
6564
6565
chan->rx_avail = rx_avail;
6566
6567
if (chan->state == BT_CONNECTED)
6568
l2cap_chan_le_send_credits(chan);
6569
}
6570
6571
static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6572
{
6573
int err;
6574
6575
BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6576
6577
/* Wait recv to confirm reception before updating the credits */
6578
err = chan->ops->recv(chan, skb);
6579
6580
if (err < 0 && chan->rx_avail != -1) {
6581
BT_ERR("Queueing received LE L2CAP data failed");
6582
l2cap_send_disconn_req(chan, ECONNRESET);
6583
return err;
6584
}
6585
6586
/* Update credits whenever an SDU is received */
6587
l2cap_chan_le_send_credits(chan);
6588
6589
return err;
6590
}
6591
6592
static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6593
{
6594
int err;
6595
6596
if (!chan->rx_credits) {
6597
BT_ERR("No credits to receive LE L2CAP data");
6598
l2cap_send_disconn_req(chan, ECONNRESET);
6599
return -ENOBUFS;
6600
}
6601
6602
if (chan->imtu < skb->len) {
6603
BT_ERR("Too big LE L2CAP PDU");
6604
return -ENOBUFS;
6605
}
6606
6607
chan->rx_credits--;
6608
BT_DBG("chan %p: rx_credits %u -> %u",
6609
chan, chan->rx_credits + 1, chan->rx_credits);
6610
6611
/* Update if remote had run out of credits, this should only happens
6612
* if the remote is not using the entire MPS.
6613
*/
6614
if (!chan->rx_credits)
6615
l2cap_chan_le_send_credits(chan);
6616
6617
err = 0;
6618
6619
if (!chan->sdu) {
6620
u16 sdu_len;
6621
6622
sdu_len = get_unaligned_le16(skb->data);
6623
skb_pull(skb, L2CAP_SDULEN_SIZE);
6624
6625
BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6626
sdu_len, skb->len, chan->imtu);
6627
6628
if (sdu_len > chan->imtu) {
6629
BT_ERR("Too big LE L2CAP SDU length received");
6630
err = -EMSGSIZE;
6631
goto failed;
6632
}
6633
6634
if (skb->len > sdu_len) {
6635
BT_ERR("Too much LE L2CAP data received");
6636
err = -EINVAL;
6637
goto failed;
6638
}
6639
6640
if (skb->len == sdu_len)
6641
return l2cap_ecred_recv(chan, skb);
6642
6643
chan->sdu = skb;
6644
chan->sdu_len = sdu_len;
6645
chan->sdu_last_frag = skb;
6646
6647
/* Detect if remote is not able to use the selected MPS */
6648
if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6649
u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6650
6651
/* Adjust the number of credits */
6652
BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6653
chan->mps = mps_len;
6654
l2cap_chan_le_send_credits(chan);
6655
}
6656
6657
return 0;
6658
}
6659
6660
BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6661
chan->sdu->len, skb->len, chan->sdu_len);
6662
6663
if (chan->sdu->len + skb->len > chan->sdu_len) {
6664
BT_ERR("Too much LE L2CAP data received");
6665
err = -EINVAL;
6666
goto failed;
6667
}
6668
6669
append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6670
skb = NULL;
6671
6672
if (chan->sdu->len == chan->sdu_len) {
6673
err = l2cap_ecred_recv(chan, chan->sdu);
6674
if (!err) {
6675
chan->sdu = NULL;
6676
chan->sdu_last_frag = NULL;
6677
chan->sdu_len = 0;
6678
}
6679
}
6680
6681
failed:
6682
if (err) {
6683
kfree_skb(skb);
6684
kfree_skb(chan->sdu);
6685
chan->sdu = NULL;
6686
chan->sdu_last_frag = NULL;
6687
chan->sdu_len = 0;
6688
}
6689
6690
/* We can't return an error here since we took care of the skb
6691
* freeing internally. An error return would cause the caller to
6692
* do a double-free of the skb.
6693
*/
6694
return 0;
6695
}
6696
6697
static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6698
struct sk_buff *skb)
6699
{
6700
struct l2cap_chan *chan;
6701
6702
chan = l2cap_get_chan_by_scid(conn, cid);
6703
if (!chan) {
6704
BT_DBG("unknown cid 0x%4.4x", cid);
6705
/* Drop packet and return */
6706
kfree_skb(skb);
6707
return;
6708
}
6709
6710
BT_DBG("chan %p, len %d", chan, skb->len);
6711
6712
/* If we receive data on a fixed channel before the info req/rsp
6713
* procedure is done simply assume that the channel is supported
6714
* and mark it as ready.
6715
*/
6716
if (chan->chan_type == L2CAP_CHAN_FIXED)
6717
l2cap_chan_ready(chan);
6718
6719
if (chan->state != BT_CONNECTED)
6720
goto drop;
6721
6722
switch (chan->mode) {
6723
case L2CAP_MODE_LE_FLOWCTL:
6724
case L2CAP_MODE_EXT_FLOWCTL:
6725
if (l2cap_ecred_data_rcv(chan, skb) < 0)
6726
goto drop;
6727
6728
goto done;
6729
6730
case L2CAP_MODE_BASIC:
6731
/* If socket recv buffers overflows we drop data here
6732
* which is *bad* because L2CAP has to be reliable.
6733
* But we don't have any other choice. L2CAP doesn't
6734
* provide flow control mechanism. */
6735
6736
if (chan->imtu < skb->len) {
6737
BT_ERR("Dropping L2CAP data: receive buffer overflow");
6738
goto drop;
6739
}
6740
6741
if (!chan->ops->recv(chan, skb))
6742
goto done;
6743
break;
6744
6745
case L2CAP_MODE_ERTM:
6746
case L2CAP_MODE_STREAMING:
6747
l2cap_data_rcv(chan, skb);
6748
goto done;
6749
6750
default:
6751
BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6752
break;
6753
}
6754
6755
drop:
6756
kfree_skb(skb);
6757
6758
done:
6759
l2cap_chan_unlock(chan);
6760
l2cap_chan_put(chan);
6761
}
6762
6763
static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6764
struct sk_buff *skb)
6765
{
6766
struct hci_conn *hcon = conn->hcon;
6767
struct l2cap_chan *chan;
6768
6769
if (hcon->type != ACL_LINK)
6770
goto free_skb;
6771
6772
chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6773
ACL_LINK);
6774
if (!chan)
6775
goto free_skb;
6776
6777
BT_DBG("chan %p, len %d", chan, skb->len);
6778
6779
l2cap_chan_lock(chan);
6780
6781
if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6782
goto drop;
6783
6784
if (chan->imtu < skb->len)
6785
goto drop;
6786
6787
/* Store remote BD_ADDR and PSM for msg_name */
6788
bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6789
bt_cb(skb)->l2cap.psm = psm;
6790
6791
if (!chan->ops->recv(chan, skb)) {
6792
l2cap_chan_unlock(chan);
6793
l2cap_chan_put(chan);
6794
return;
6795
}
6796
6797
drop:
6798
l2cap_chan_unlock(chan);
6799
l2cap_chan_put(chan);
6800
free_skb:
6801
kfree_skb(skb);
6802
}
6803
6804
static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6805
{
6806
struct l2cap_hdr *lh = (void *) skb->data;
6807
struct hci_conn *hcon = conn->hcon;
6808
u16 cid, len;
6809
__le16 psm;
6810
6811
if (hcon->state != BT_CONNECTED) {
6812
BT_DBG("queueing pending rx skb");
6813
skb_queue_tail(&conn->pending_rx, skb);
6814
return;
6815
}
6816
6817
skb_pull(skb, L2CAP_HDR_SIZE);
6818
cid = __le16_to_cpu(lh->cid);
6819
len = __le16_to_cpu(lh->len);
6820
6821
if (len != skb->len) {
6822
kfree_skb(skb);
6823
return;
6824
}
6825
6826
/* Since we can't actively block incoming LE connections we must
6827
* at least ensure that we ignore incoming data from them.
6828
*/
6829
if (hcon->type == LE_LINK &&
6830
hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
6831
bdaddr_dst_type(hcon))) {
6832
kfree_skb(skb);
6833
return;
6834
}
6835
6836
BT_DBG("len %d, cid 0x%4.4x", len, cid);
6837
6838
switch (cid) {
6839
case L2CAP_CID_SIGNALING:
6840
l2cap_sig_channel(conn, skb);
6841
break;
6842
6843
case L2CAP_CID_CONN_LESS:
6844
psm = get_unaligned((__le16 *) skb->data);
6845
skb_pull(skb, L2CAP_PSMLEN_SIZE);
6846
l2cap_conless_channel(conn, psm, skb);
6847
break;
6848
6849
case L2CAP_CID_LE_SIGNALING:
6850
l2cap_le_sig_channel(conn, skb);
6851
break;
6852
6853
default:
6854
l2cap_data_channel(conn, cid, skb);
6855
break;
6856
}
6857
}
6858
6859
static void process_pending_rx(struct work_struct *work)
6860
{
6861
struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6862
pending_rx_work);
6863
struct sk_buff *skb;
6864
6865
BT_DBG("");
6866
6867
mutex_lock(&conn->lock);
6868
6869
while ((skb = skb_dequeue(&conn->pending_rx)))
6870
l2cap_recv_frame(conn, skb);
6871
6872
mutex_unlock(&conn->lock);
6873
}
6874
6875
static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6876
{
6877
struct l2cap_conn *conn = hcon->l2cap_data;
6878
struct hci_chan *hchan;
6879
6880
if (conn)
6881
return conn;
6882
6883
hchan = hci_chan_create(hcon);
6884
if (!hchan)
6885
return NULL;
6886
6887
conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6888
if (!conn) {
6889
hci_chan_del(hchan);
6890
return NULL;
6891
}
6892
6893
kref_init(&conn->ref);
6894
hcon->l2cap_data = conn;
6895
conn->hcon = hci_conn_get(hcon);
6896
conn->hchan = hchan;
6897
6898
BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6899
6900
conn->mtu = hcon->mtu;
6901
conn->feat_mask = 0;
6902
6903
conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6904
6905
if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
6906
(bredr_sc_enabled(hcon->hdev) ||
6907
hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
6908
conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
6909
6910
mutex_init(&conn->ident_lock);
6911
mutex_init(&conn->lock);
6912
6913
INIT_LIST_HEAD(&conn->chan_l);
6914
INIT_LIST_HEAD(&conn->users);
6915
6916
INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6917
6918
skb_queue_head_init(&conn->pending_rx);
6919
INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6920
INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr);
6921
6922
conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6923
6924
return conn;
6925
}
6926
6927
static bool is_valid_psm(u16 psm, u8 dst_type)
6928
{
6929
if (!psm)
6930
return false;
6931
6932
if (bdaddr_type_is_le(dst_type))
6933
return (psm <= 0x00ff);
6934
6935
/* PSM must be odd and lsb of upper byte must be 0 */
6936
return ((psm & 0x0101) == 0x0001);
6937
}
6938
6939
struct l2cap_chan_data {
6940
struct l2cap_chan *chan;
6941
struct pid *pid;
6942
int count;
6943
};
6944
6945
static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
6946
{
6947
struct l2cap_chan_data *d = data;
6948
struct pid *pid;
6949
6950
if (chan == d->chan)
6951
return;
6952
6953
if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
6954
return;
6955
6956
pid = chan->ops->get_peer_pid(chan);
6957
6958
/* Only count deferred channels with the same PID/PSM */
6959
if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
6960
chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
6961
return;
6962
6963
d->count++;
6964
}
6965
6966
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6967
bdaddr_t *dst, u8 dst_type, u16 timeout)
6968
{
6969
struct l2cap_conn *conn;
6970
struct hci_conn *hcon;
6971
struct hci_dev *hdev;
6972
int err;
6973
6974
BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
6975
dst, dst_type, __le16_to_cpu(psm), chan->mode);
6976
6977
hdev = hci_get_route(dst, &chan->src, chan->src_type);
6978
if (!hdev)
6979
return -EHOSTUNREACH;
6980
6981
hci_dev_lock(hdev);
6982
6983
if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6984
chan->chan_type != L2CAP_CHAN_RAW) {
6985
err = -EINVAL;
6986
goto done;
6987
}
6988
6989
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6990
err = -EINVAL;
6991
goto done;
6992
}
6993
6994
if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
6995
err = -EINVAL;
6996
goto done;
6997
}
6998
6999
switch (chan->mode) {
7000
case L2CAP_MODE_BASIC:
7001
break;
7002
case L2CAP_MODE_LE_FLOWCTL:
7003
break;
7004
case L2CAP_MODE_EXT_FLOWCTL:
7005
if (!enable_ecred) {
7006
err = -EOPNOTSUPP;
7007
goto done;
7008
}
7009
break;
7010
case L2CAP_MODE_ERTM:
7011
case L2CAP_MODE_STREAMING:
7012
if (!disable_ertm)
7013
break;
7014
fallthrough;
7015
default:
7016
err = -EOPNOTSUPP;
7017
goto done;
7018
}
7019
7020
switch (chan->state) {
7021
case BT_CONNECT:
7022
case BT_CONNECT2:
7023
case BT_CONFIG:
7024
/* Already connecting */
7025
err = 0;
7026
goto done;
7027
7028
case BT_CONNECTED:
7029
/* Already connected */
7030
err = -EISCONN;
7031
goto done;
7032
7033
case BT_OPEN:
7034
case BT_BOUND:
7035
/* Can connect */
7036
break;
7037
7038
default:
7039
err = -EBADFD;
7040
goto done;
7041
}
7042
7043
/* Set destination address and psm */
7044
bacpy(&chan->dst, dst);
7045
chan->dst_type = dst_type;
7046
7047
chan->psm = psm;
7048
chan->dcid = cid;
7049
7050
if (bdaddr_type_is_le(dst_type)) {
7051
/* Convert from L2CAP channel address type to HCI address type
7052
*/
7053
if (dst_type == BDADDR_LE_PUBLIC)
7054
dst_type = ADDR_LE_DEV_PUBLIC;
7055
else
7056
dst_type = ADDR_LE_DEV_RANDOM;
7057
7058
if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7059
hcon = hci_connect_le(hdev, dst, dst_type, false,
7060
chan->sec_level, timeout,
7061
HCI_ROLE_SLAVE, 0, 0);
7062
else
7063
hcon = hci_connect_le_scan(hdev, dst, dst_type,
7064
chan->sec_level, timeout,
7065
CONN_REASON_L2CAP_CHAN);
7066
7067
} else {
7068
u8 auth_type = l2cap_get_auth_type(chan);
7069
hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
7070
CONN_REASON_L2CAP_CHAN, timeout);
7071
}
7072
7073
if (IS_ERR(hcon)) {
7074
err = PTR_ERR(hcon);
7075
goto done;
7076
}
7077
7078
conn = l2cap_conn_add(hcon);
7079
if (!conn) {
7080
hci_conn_drop(hcon);
7081
err = -ENOMEM;
7082
goto done;
7083
}
7084
7085
if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
7086
struct l2cap_chan_data data;
7087
7088
data.chan = chan;
7089
data.pid = chan->ops->get_peer_pid(chan);
7090
data.count = 1;
7091
7092
l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
7093
7094
/* Check if there isn't too many channels being connected */
7095
if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
7096
hci_conn_drop(hcon);
7097
err = -EPROTO;
7098
goto done;
7099
}
7100
}
7101
7102
mutex_lock(&conn->lock);
7103
l2cap_chan_lock(chan);
7104
7105
if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7106
hci_conn_drop(hcon);
7107
err = -EBUSY;
7108
goto chan_unlock;
7109
}
7110
7111
/* Update source addr of the socket */
7112
bacpy(&chan->src, &hcon->src);
7113
chan->src_type = bdaddr_src_type(hcon);
7114
7115
__l2cap_chan_add(conn, chan);
7116
7117
/* l2cap_chan_add takes its own ref so we can drop this one */
7118
hci_conn_drop(hcon);
7119
7120
l2cap_state_change(chan, BT_CONNECT);
7121
__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7122
7123
/* Release chan->sport so that it can be reused by other
7124
* sockets (as it's only used for listening sockets).
7125
*/
7126
write_lock(&chan_list_lock);
7127
chan->sport = 0;
7128
write_unlock(&chan_list_lock);
7129
7130
if (hcon->state == BT_CONNECTED) {
7131
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7132
__clear_chan_timer(chan);
7133
if (l2cap_chan_check_security(chan, true))
7134
l2cap_state_change(chan, BT_CONNECTED);
7135
} else
7136
l2cap_do_start(chan);
7137
}
7138
7139
err = 0;
7140
7141
chan_unlock:
7142
l2cap_chan_unlock(chan);
7143
mutex_unlock(&conn->lock);
7144
done:
7145
hci_dev_unlock(hdev);
7146
hci_dev_put(hdev);
7147
return err;
7148
}
7149
EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7150
7151
static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
7152
{
7153
struct l2cap_conn *conn = chan->conn;
7154
DEFINE_RAW_FLEX(struct l2cap_ecred_reconf_req, pdu, scid, 1);
7155
7156
pdu->mtu = cpu_to_le16(chan->imtu);
7157
pdu->mps = cpu_to_le16(chan->mps);
7158
pdu->scid[0] = cpu_to_le16(chan->scid);
7159
7160
chan->ident = l2cap_get_ident(conn);
7161
7162
l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
7163
sizeof(pdu), &pdu);
7164
}
7165
7166
int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
7167
{
7168
if (chan->imtu > mtu)
7169
return -EINVAL;
7170
7171
BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
7172
7173
chan->imtu = mtu;
7174
7175
l2cap_ecred_reconfigure(chan);
7176
7177
return 0;
7178
}
7179
7180
/* ---- L2CAP interface with lower layer (HCI) ---- */
7181
7182
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7183
{
7184
int exact = 0, lm1 = 0, lm2 = 0;
7185
struct l2cap_chan *c;
7186
7187
BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7188
7189
/* Find listening sockets and check their link_mode */
7190
read_lock(&chan_list_lock);
7191
list_for_each_entry(c, &chan_list, global_l) {
7192
if (c->state != BT_LISTEN)
7193
continue;
7194
7195
if (!bacmp(&c->src, &hdev->bdaddr)) {
7196
lm1 |= HCI_LM_ACCEPT;
7197
if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7198
lm1 |= HCI_LM_MASTER;
7199
exact++;
7200
} else if (!bacmp(&c->src, BDADDR_ANY)) {
7201
lm2 |= HCI_LM_ACCEPT;
7202
if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7203
lm2 |= HCI_LM_MASTER;
7204
}
7205
}
7206
read_unlock(&chan_list_lock);
7207
7208
return exact ? lm1 : lm2;
7209
}
7210
7211
/* Find the next fixed channel in BT_LISTEN state, continue iteration
7212
* from an existing channel in the list or from the beginning of the
7213
* global list (by passing NULL as first parameter).
7214
*/
7215
static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7216
struct hci_conn *hcon)
7217
{
7218
u8 src_type = bdaddr_src_type(hcon);
7219
7220
read_lock(&chan_list_lock);
7221
7222
if (c)
7223
c = list_next_entry(c, global_l);
7224
else
7225
c = list_entry(chan_list.next, typeof(*c), global_l);
7226
7227
list_for_each_entry_from(c, &chan_list, global_l) {
7228
if (c->chan_type != L2CAP_CHAN_FIXED)
7229
continue;
7230
if (c->state != BT_LISTEN)
7231
continue;
7232
if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7233
continue;
7234
if (src_type != c->src_type)
7235
continue;
7236
7237
c = l2cap_chan_hold_unless_zero(c);
7238
read_unlock(&chan_list_lock);
7239
return c;
7240
}
7241
7242
read_unlock(&chan_list_lock);
7243
7244
return NULL;
7245
}
7246
7247
static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7248
{
7249
struct hci_dev *hdev = hcon->hdev;
7250
struct l2cap_conn *conn;
7251
struct l2cap_chan *pchan;
7252
u8 dst_type;
7253
7254
if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7255
return;
7256
7257
BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7258
7259
if (status) {
7260
l2cap_conn_del(hcon, bt_to_errno(status));
7261
return;
7262
}
7263
7264
conn = l2cap_conn_add(hcon);
7265
if (!conn)
7266
return;
7267
7268
dst_type = bdaddr_dst_type(hcon);
7269
7270
/* If device is blocked, do not create channels for it */
7271
if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
7272
return;
7273
7274
/* Find fixed channels and notify them of the new connection. We
7275
* use multiple individual lookups, continuing each time where
7276
* we left off, because the list lock would prevent calling the
7277
* potentially sleeping l2cap_chan_lock() function.
7278
*/
7279
pchan = l2cap_global_fixed_chan(NULL, hcon);
7280
while (pchan) {
7281
struct l2cap_chan *chan, *next;
7282
7283
/* Client fixed channels should override server ones */
7284
if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7285
goto next;
7286
7287
l2cap_chan_lock(pchan);
7288
chan = pchan->ops->new_connection(pchan);
7289
if (chan) {
7290
bacpy(&chan->src, &hcon->src);
7291
bacpy(&chan->dst, &hcon->dst);
7292
chan->src_type = bdaddr_src_type(hcon);
7293
chan->dst_type = dst_type;
7294
7295
__l2cap_chan_add(conn, chan);
7296
}
7297
7298
l2cap_chan_unlock(pchan);
7299
next:
7300
next = l2cap_global_fixed_chan(pchan, hcon);
7301
l2cap_chan_put(pchan);
7302
pchan = next;
7303
}
7304
7305
l2cap_conn_ready(conn);
7306
}
7307
7308
int l2cap_disconn_ind(struct hci_conn *hcon)
7309
{
7310
struct l2cap_conn *conn = hcon->l2cap_data;
7311
7312
BT_DBG("hcon %p", hcon);
7313
7314
if (!conn)
7315
return HCI_ERROR_REMOTE_USER_TERM;
7316
return conn->disc_reason;
7317
}
7318
7319
static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7320
{
7321
if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7322
return;
7323
7324
BT_DBG("hcon %p reason %d", hcon, reason);
7325
7326
l2cap_conn_del(hcon, bt_to_errno(reason));
7327
}
7328
7329
static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7330
{
7331
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7332
return;
7333
7334
if (encrypt == 0x00) {
7335
if (chan->sec_level == BT_SECURITY_MEDIUM) {
7336
__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7337
} else if (chan->sec_level == BT_SECURITY_HIGH ||
7338
chan->sec_level == BT_SECURITY_FIPS)
7339
l2cap_chan_close(chan, ECONNREFUSED);
7340
} else {
7341
if (chan->sec_level == BT_SECURITY_MEDIUM)
7342
__clear_chan_timer(chan);
7343
}
7344
}
7345
7346
static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7347
{
7348
struct l2cap_conn *conn = hcon->l2cap_data;
7349
struct l2cap_chan *chan;
7350
7351
if (!conn)
7352
return;
7353
7354
BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7355
7356
mutex_lock(&conn->lock);
7357
7358
list_for_each_entry(chan, &conn->chan_l, list) {
7359
l2cap_chan_lock(chan);
7360
7361
BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7362
state_to_string(chan->state));
7363
7364
if (!status && encrypt)
7365
chan->sec_level = hcon->sec_level;
7366
7367
if (!__l2cap_no_conn_pending(chan)) {
7368
l2cap_chan_unlock(chan);
7369
continue;
7370
}
7371
7372
if (!status && (chan->state == BT_CONNECTED ||
7373
chan->state == BT_CONFIG)) {
7374
chan->ops->resume(chan);
7375
l2cap_check_encryption(chan, encrypt);
7376
l2cap_chan_unlock(chan);
7377
continue;
7378
}
7379
7380
if (chan->state == BT_CONNECT) {
7381
if (!status && l2cap_check_enc_key_size(hcon, chan))
7382
l2cap_start_connection(chan);
7383
else
7384
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7385
} else if (chan->state == BT_CONNECT2 &&
7386
!(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
7387
chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
7388
struct l2cap_conn_rsp rsp;
7389
__u16 res, stat;
7390
7391
if (!status && l2cap_check_enc_key_size(hcon, chan)) {
7392
if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7393
res = L2CAP_CR_PEND;
7394
stat = L2CAP_CS_AUTHOR_PEND;
7395
chan->ops->defer(chan);
7396
} else {
7397
l2cap_state_change(chan, BT_CONFIG);
7398
res = L2CAP_CR_SUCCESS;
7399
stat = L2CAP_CS_NO_INFO;
7400
}
7401
} else {
7402
l2cap_state_change(chan, BT_DISCONN);
7403
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7404
res = L2CAP_CR_SEC_BLOCK;
7405
stat = L2CAP_CS_NO_INFO;
7406
}
7407
7408
rsp.scid = cpu_to_le16(chan->dcid);
7409
rsp.dcid = cpu_to_le16(chan->scid);
7410
rsp.result = cpu_to_le16(res);
7411
rsp.status = cpu_to_le16(stat);
7412
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7413
sizeof(rsp), &rsp);
7414
7415
if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7416
res == L2CAP_CR_SUCCESS) {
7417
char buf[128];
7418
set_bit(CONF_REQ_SENT, &chan->conf_state);
7419
l2cap_send_cmd(conn, l2cap_get_ident(conn),
7420
L2CAP_CONF_REQ,
7421
l2cap_build_conf_req(chan, buf, sizeof(buf)),
7422
buf);
7423
chan->num_conf_req++;
7424
}
7425
}
7426
7427
l2cap_chan_unlock(chan);
7428
}
7429
7430
mutex_unlock(&conn->lock);
7431
}
7432
7433
/* Append fragment into frame respecting the maximum len of rx_skb */
7434
static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
7435
u16 len)
7436
{
7437
if (!conn->rx_skb) {
7438
/* Allocate skb for the complete frame (with header) */
7439
conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7440
if (!conn->rx_skb)
7441
return -ENOMEM;
7442
/* Init rx_len */
7443
conn->rx_len = len;
7444
7445
skb_set_delivery_time(conn->rx_skb, skb->tstamp,
7446
skb->tstamp_type);
7447
}
7448
7449
/* Copy as much as the rx_skb can hold */
7450
len = min_t(u16, len, skb->len);
7451
skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
7452
skb_pull(skb, len);
7453
conn->rx_len -= len;
7454
7455
return len;
7456
}
7457
7458
static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
7459
{
7460
struct sk_buff *rx_skb;
7461
int len;
7462
7463
/* Append just enough to complete the header */
7464
len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
7465
7466
/* If header could not be read just continue */
7467
if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
7468
return len;
7469
7470
rx_skb = conn->rx_skb;
7471
len = get_unaligned_le16(rx_skb->data);
7472
7473
/* Check if rx_skb has enough space to received all fragments */
7474
if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
7475
/* Update expected len */
7476
conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
7477
return L2CAP_LEN_SIZE;
7478
}
7479
7480
/* Reset conn->rx_skb since it will need to be reallocated in order to
7481
* fit all fragments.
7482
*/
7483
conn->rx_skb = NULL;
7484
7485
/* Reallocates rx_skb using the exact expected length */
7486
len = l2cap_recv_frag(conn, rx_skb,
7487
len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
7488
kfree_skb(rx_skb);
7489
7490
return len;
7491
}
7492
7493
static void l2cap_recv_reset(struct l2cap_conn *conn)
7494
{
7495
kfree_skb(conn->rx_skb);
7496
conn->rx_skb = NULL;
7497
conn->rx_len = 0;
7498
}
7499
7500
struct l2cap_conn *l2cap_conn_hold_unless_zero(struct l2cap_conn *c)
7501
{
7502
if (!c)
7503
return NULL;
7504
7505
BT_DBG("conn %p orig refcnt %u", c, kref_read(&c->ref));
7506
7507
if (!kref_get_unless_zero(&c->ref))
7508
return NULL;
7509
7510
return c;
7511
}
7512
7513
int l2cap_recv_acldata(struct hci_dev *hdev, u16 handle,
7514
struct sk_buff *skb, u16 flags)
7515
{
7516
struct hci_conn *hcon;
7517
struct l2cap_conn *conn;
7518
int len;
7519
7520
/* Lock hdev for hci_conn, and race on l2cap_data vs. l2cap_conn_del */
7521
hci_dev_lock(hdev);
7522
7523
hcon = hci_conn_hash_lookup_handle(hdev, handle);
7524
if (!hcon) {
7525
hci_dev_unlock(hdev);
7526
kfree_skb(skb);
7527
return -ENOENT;
7528
}
7529
7530
hci_conn_enter_active_mode(hcon, BT_POWER_FORCE_ACTIVE_OFF);
7531
7532
conn = hcon->l2cap_data;
7533
7534
if (!conn)
7535
conn = l2cap_conn_add(hcon);
7536
7537
conn = l2cap_conn_hold_unless_zero(conn);
7538
hcon = NULL;
7539
7540
hci_dev_unlock(hdev);
7541
7542
if (!conn) {
7543
kfree_skb(skb);
7544
return -EINVAL;
7545
}
7546
7547
BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
7548
7549
mutex_lock(&conn->lock);
7550
7551
switch (flags) {
7552
case ACL_START:
7553
case ACL_START_NO_FLUSH:
7554
case ACL_COMPLETE:
7555
if (conn->rx_skb) {
7556
BT_ERR("Unexpected start frame (len %d)", skb->len);
7557
l2cap_recv_reset(conn);
7558
l2cap_conn_unreliable(conn, ECOMM);
7559
}
7560
7561
/* Start fragment may not contain the L2CAP length so just
7562
* copy the initial byte when that happens and use conn->mtu as
7563
* expected length.
7564
*/
7565
if (skb->len < L2CAP_LEN_SIZE) {
7566
l2cap_recv_frag(conn, skb, conn->mtu);
7567
break;
7568
}
7569
7570
len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
7571
7572
if (len == skb->len) {
7573
/* Complete frame received */
7574
l2cap_recv_frame(conn, skb);
7575
goto unlock;
7576
}
7577
7578
BT_DBG("Start: total len %d, frag len %u", len, skb->len);
7579
7580
if (skb->len > len) {
7581
BT_ERR("Frame is too long (len %u, expected len %d)",
7582
skb->len, len);
7583
/* PTS test cases L2CAP/COS/CED/BI-14-C and BI-15-C
7584
* (Multiple Signaling Command in one PDU, Data
7585
* Truncated, BR/EDR) send a C-frame to the IUT with
7586
* PDU Length set to 8 and Channel ID set to the
7587
* correct signaling channel for the logical link.
7588
* The Information payload contains one L2CAP_ECHO_REQ
7589
* packet with Data Length set to 0 with 0 octets of
7590
* echo data and one invalid command packet due to
7591
* data truncated in PDU but present in HCI packet.
7592
*
7593
* Shorter the socket buffer to the PDU length to
7594
* allow to process valid commands from the PDU before
7595
* setting the socket unreliable.
7596
*/
7597
skb->len = len;
7598
l2cap_recv_frame(conn, skb);
7599
l2cap_conn_unreliable(conn, ECOMM);
7600
goto unlock;
7601
}
7602
7603
/* Append fragment into frame (with header) */
7604
if (l2cap_recv_frag(conn, skb, len) < 0)
7605
goto drop;
7606
7607
break;
7608
7609
case ACL_CONT:
7610
BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
7611
7612
if (!conn->rx_skb) {
7613
BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7614
l2cap_conn_unreliable(conn, ECOMM);
7615
goto drop;
7616
}
7617
7618
/* Complete the L2CAP length if it has not been read */
7619
if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
7620
if (l2cap_recv_len(conn, skb) < 0) {
7621
l2cap_conn_unreliable(conn, ECOMM);
7622
goto drop;
7623
}
7624
7625
/* Header still could not be read just continue */
7626
if (conn->rx_skb->len < L2CAP_LEN_SIZE)
7627
break;
7628
}
7629
7630
if (skb->len > conn->rx_len) {
7631
BT_ERR("Fragment is too long (len %u, expected %u)",
7632
skb->len, conn->rx_len);
7633
l2cap_recv_reset(conn);
7634
l2cap_conn_unreliable(conn, ECOMM);
7635
goto drop;
7636
}
7637
7638
/* Append fragment into frame (with header) */
7639
l2cap_recv_frag(conn, skb, skb->len);
7640
7641
if (!conn->rx_len) {
7642
/* Complete frame received. l2cap_recv_frame
7643
* takes ownership of the skb so set the global
7644
* rx_skb pointer to NULL first.
7645
*/
7646
struct sk_buff *rx_skb = conn->rx_skb;
7647
conn->rx_skb = NULL;
7648
l2cap_recv_frame(conn, rx_skb);
7649
}
7650
break;
7651
}
7652
7653
drop:
7654
kfree_skb(skb);
7655
unlock:
7656
mutex_unlock(&conn->lock);
7657
l2cap_conn_put(conn);
7658
return 0;
7659
}
7660
7661
static struct hci_cb l2cap_cb = {
7662
.name = "L2CAP",
7663
.connect_cfm = l2cap_connect_cfm,
7664
.disconn_cfm = l2cap_disconn_cfm,
7665
.security_cfm = l2cap_security_cfm,
7666
};
7667
7668
static int l2cap_debugfs_show(struct seq_file *f, void *p)
7669
{
7670
struct l2cap_chan *c;
7671
7672
read_lock(&chan_list_lock);
7673
7674
list_for_each_entry(c, &chan_list, global_l) {
7675
seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7676
&c->src, c->src_type, &c->dst, c->dst_type,
7677
c->state, __le16_to_cpu(c->psm),
7678
c->scid, c->dcid, c->imtu, c->omtu,
7679
c->sec_level, c->mode);
7680
}
7681
7682
read_unlock(&chan_list_lock);
7683
7684
return 0;
7685
}
7686
7687
DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7688
7689
static struct dentry *l2cap_debugfs;
7690
7691
int __init l2cap_init(void)
7692
{
7693
int err;
7694
7695
err = l2cap_init_sockets();
7696
if (err < 0)
7697
return err;
7698
7699
hci_register_cb(&l2cap_cb);
7700
7701
if (IS_ERR_OR_NULL(bt_debugfs))
7702
return 0;
7703
7704
l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7705
NULL, &l2cap_debugfs_fops);
7706
7707
return 0;
7708
}
7709
7710
void l2cap_exit(void)
7711
{
7712
debugfs_remove(l2cap_debugfs);
7713
hci_unregister_cb(&l2cap_cb);
7714
l2cap_cleanup_sockets();
7715
}
7716
7717
module_param(disable_ertm, bool, 0644);
7718
MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7719
7720
module_param(enable_ecred, bool, 0644);
7721
MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
7722
7723