Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/bluetooth/6lowpan.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
Copyright (c) 2013-2014 Intel Corp.
4
5
*/
6
7
#include <linux/if_arp.h>
8
#include <linux/netdevice.h>
9
#include <linux/etherdevice.h>
10
#include <linux/module.h>
11
#include <linux/debugfs.h>
12
13
#include <net/ipv6.h>
14
#include <net/ip6_route.h>
15
#include <net/addrconf.h>
16
#include <net/netdev_lock.h>
17
#include <net/pkt_sched.h>
18
19
#include <net/bluetooth/bluetooth.h>
20
#include <net/bluetooth/hci_core.h>
21
#include <net/bluetooth/l2cap.h>
22
23
#include <net/6lowpan.h> /* for the compression support */
24
25
#define VERSION "0.1"
26
27
static struct dentry *lowpan_enable_debugfs;
28
static struct dentry *lowpan_control_debugfs;
29
30
#define IFACE_NAME_TEMPLATE "bt%d"
31
32
struct skb_cb {
33
struct in6_addr addr;
34
struct in6_addr gw;
35
struct l2cap_chan *chan;
36
};
37
#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb))
38
39
/* The devices list contains those devices that we are acting
40
* as a proxy. The BT 6LoWPAN device is a virtual device that
41
* connects to the Bluetooth LE device. The real connection to
42
* BT device is done via l2cap layer. There exists one
43
* virtual device / one BT 6LoWPAN network (=hciX device).
44
* The list contains struct lowpan_dev elements.
45
*/
46
static LIST_HEAD(bt_6lowpan_devices);
47
static DEFINE_SPINLOCK(devices_lock);
48
49
static bool enable_6lowpan;
50
51
/* We are listening incoming connections via this channel
52
*/
53
static struct l2cap_chan *listen_chan;
54
static DEFINE_MUTEX(set_lock);
55
56
struct lowpan_peer {
57
struct list_head list;
58
struct rcu_head rcu;
59
struct l2cap_chan *chan;
60
61
/* peer addresses in various formats */
62
unsigned char lladdr[ETH_ALEN];
63
struct in6_addr peer_addr;
64
};
65
66
struct lowpan_btle_dev {
67
struct list_head list;
68
69
struct hci_dev *hdev;
70
struct net_device *netdev;
71
struct list_head peers;
72
atomic_t peer_count; /* number of items in peers list */
73
74
struct work_struct delete_netdev;
75
struct delayed_work notify_peers;
76
};
77
78
static inline struct lowpan_btle_dev *
79
lowpan_btle_dev(const struct net_device *netdev)
80
{
81
return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv;
82
}
83
84
static inline void peer_add(struct lowpan_btle_dev *dev,
85
struct lowpan_peer *peer)
86
{
87
list_add_rcu(&peer->list, &dev->peers);
88
atomic_inc(&dev->peer_count);
89
}
90
91
static inline bool peer_del(struct lowpan_btle_dev *dev,
92
struct lowpan_peer *peer)
93
{
94
list_del_rcu(&peer->list);
95
kfree_rcu(peer, rcu);
96
97
module_put(THIS_MODULE);
98
99
if (atomic_dec_and_test(&dev->peer_count)) {
100
BT_DBG("last peer");
101
return true;
102
}
103
104
return false;
105
}
106
107
static inline struct lowpan_peer *
108
__peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan)
109
{
110
struct lowpan_peer *peer;
111
112
list_for_each_entry_rcu(peer, &dev->peers, list) {
113
if (peer->chan == chan)
114
return peer;
115
}
116
117
return NULL;
118
}
119
120
static inline struct lowpan_peer *
121
__peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn)
122
{
123
struct lowpan_peer *peer;
124
125
list_for_each_entry_rcu(peer, &dev->peers, list) {
126
if (peer->chan->conn == conn)
127
return peer;
128
}
129
130
return NULL;
131
}
132
133
static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
134
struct in6_addr *daddr,
135
struct sk_buff *skb)
136
{
137
struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
138
int count = atomic_read(&dev->peer_count);
139
const struct in6_addr *nexthop;
140
struct lowpan_peer *peer;
141
struct neighbour *neigh;
142
143
BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
144
145
if (!rt) {
146
if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
147
/* There is neither route nor gateway,
148
* probably the destination is a direct peer.
149
*/
150
nexthop = daddr;
151
} else {
152
/* There is a known gateway
153
*/
154
nexthop = &lowpan_cb(skb)->gw;
155
}
156
} else {
157
nexthop = rt6_nexthop(rt, daddr);
158
159
/* We need to remember the address because it is needed
160
* by bt_xmit() when sending the packet. In bt_xmit(), the
161
* destination routing info is not set.
162
*/
163
memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
164
}
165
166
BT_DBG("gw %pI6c", nexthop);
167
168
rcu_read_lock();
169
170
list_for_each_entry_rcu(peer, &dev->peers, list) {
171
BT_DBG("dst addr %pMR dst type %u ip %pI6c",
172
&peer->chan->dst, peer->chan->dst_type,
173
&peer->peer_addr);
174
175
if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
176
rcu_read_unlock();
177
return peer;
178
}
179
}
180
181
/* use the neighbour cache for matching addresses assigned by SLAAC */
182
neigh = __ipv6_neigh_lookup(dev->netdev, nexthop);
183
if (neigh) {
184
list_for_each_entry_rcu(peer, &dev->peers, list) {
185
if (!memcmp(neigh->ha, peer->lladdr, ETH_ALEN)) {
186
neigh_release(neigh);
187
rcu_read_unlock();
188
return peer;
189
}
190
}
191
neigh_release(neigh);
192
}
193
194
rcu_read_unlock();
195
196
return NULL;
197
}
198
199
static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
200
{
201
struct lowpan_btle_dev *entry;
202
struct lowpan_peer *peer = NULL;
203
204
rcu_read_lock();
205
206
list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
207
peer = __peer_lookup_conn(entry, conn);
208
if (peer)
209
break;
210
}
211
212
rcu_read_unlock();
213
214
return peer;
215
}
216
217
static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn)
218
{
219
struct lowpan_btle_dev *entry;
220
struct lowpan_btle_dev *dev = NULL;
221
222
rcu_read_lock();
223
224
list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
225
if (conn->hcon->hdev == entry->hdev) {
226
dev = entry;
227
break;
228
}
229
}
230
231
rcu_read_unlock();
232
233
return dev;
234
}
235
236
static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev)
237
{
238
struct sk_buff *skb_cp;
239
240
skb_cp = skb_copy(skb, GFP_ATOMIC);
241
if (!skb_cp)
242
return NET_RX_DROP;
243
244
return netif_rx(skb_cp);
245
}
246
247
static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
248
struct lowpan_peer *peer)
249
{
250
const u8 *saddr;
251
252
saddr = peer->lladdr;
253
254
return lowpan_header_decompress(skb, netdev, netdev->dev_addr, saddr);
255
}
256
257
static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
258
struct lowpan_peer *peer)
259
{
260
struct sk_buff *local_skb;
261
int ret;
262
263
if (!netif_running(dev))
264
goto drop;
265
266
if (dev->type != ARPHRD_6LOWPAN || !skb->len)
267
goto drop;
268
269
skb_reset_network_header(skb);
270
271
skb = skb_share_check(skb, GFP_ATOMIC);
272
if (!skb)
273
goto drop;
274
275
/* check that it's our buffer */
276
if (lowpan_is_ipv6(*skb_network_header(skb))) {
277
/* Pull off the 1-byte of 6lowpan header. */
278
skb_pull(skb, 1);
279
280
/* Copy the packet so that the IPv6 header is
281
* properly aligned.
282
*/
283
local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
284
skb_tailroom(skb), GFP_ATOMIC);
285
if (!local_skb)
286
goto drop;
287
288
local_skb->protocol = htons(ETH_P_IPV6);
289
local_skb->pkt_type = PACKET_HOST;
290
local_skb->dev = dev;
291
292
skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
293
294
if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
295
kfree_skb(local_skb);
296
goto drop;
297
}
298
299
dev->stats.rx_bytes += skb->len;
300
dev->stats.rx_packets++;
301
302
consume_skb(local_skb);
303
consume_skb(skb);
304
} else if (lowpan_is_iphc(*skb_network_header(skb))) {
305
local_skb = skb_clone(skb, GFP_ATOMIC);
306
if (!local_skb)
307
goto drop;
308
309
local_skb->dev = dev;
310
311
ret = iphc_decompress(local_skb, dev, peer);
312
if (ret < 0) {
313
BT_DBG("iphc_decompress failed: %d", ret);
314
kfree_skb(local_skb);
315
goto drop;
316
}
317
318
local_skb->protocol = htons(ETH_P_IPV6);
319
local_skb->pkt_type = PACKET_HOST;
320
321
if (give_skb_to_upper(local_skb, dev)
322
!= NET_RX_SUCCESS) {
323
kfree_skb(local_skb);
324
goto drop;
325
}
326
327
dev->stats.rx_bytes += skb->len;
328
dev->stats.rx_packets++;
329
330
consume_skb(local_skb);
331
consume_skb(skb);
332
} else {
333
BT_DBG("unknown packet type");
334
goto drop;
335
}
336
337
return NET_RX_SUCCESS;
338
339
drop:
340
dev->stats.rx_dropped++;
341
return NET_RX_DROP;
342
}
343
344
/* Packet from BT LE device */
345
static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
346
{
347
struct lowpan_btle_dev *dev;
348
struct lowpan_peer *peer;
349
int err;
350
351
peer = lookup_peer(chan->conn);
352
if (!peer)
353
return -ENOENT;
354
355
dev = lookup_dev(chan->conn);
356
if (!dev || !dev->netdev)
357
return -ENOENT;
358
359
err = recv_pkt(skb, dev->netdev, peer);
360
if (err) {
361
BT_DBG("recv pkt %d", err);
362
err = -EAGAIN;
363
}
364
365
return err;
366
}
367
368
static int setup_header(struct sk_buff *skb, struct net_device *netdev,
369
bdaddr_t *peer_addr, u8 *peer_addr_type)
370
{
371
struct in6_addr ipv6_daddr;
372
struct ipv6hdr *hdr;
373
struct lowpan_btle_dev *dev;
374
struct lowpan_peer *peer;
375
u8 *daddr;
376
int err, status = 0;
377
378
hdr = ipv6_hdr(skb);
379
380
dev = lowpan_btle_dev(netdev);
381
382
memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr));
383
384
if (ipv6_addr_is_multicast(&ipv6_daddr)) {
385
lowpan_cb(skb)->chan = NULL;
386
daddr = NULL;
387
} else {
388
BT_DBG("dest IP %pI6c", &ipv6_daddr);
389
390
/* The packet might be sent to 6lowpan interface
391
* because of routing (either via default route
392
* or user set route) so get peer according to
393
* the destination address.
394
*/
395
peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
396
if (!peer) {
397
BT_DBG("no such peer");
398
return -ENOENT;
399
}
400
401
daddr = peer->lladdr;
402
*peer_addr = peer->chan->dst;
403
*peer_addr_type = peer->chan->dst_type;
404
lowpan_cb(skb)->chan = peer->chan;
405
406
status = 1;
407
}
408
409
lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
410
411
err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
412
if (err < 0)
413
return err;
414
415
return status;
416
}
417
418
static int header_create(struct sk_buff *skb, struct net_device *netdev,
419
unsigned short type, const void *_daddr,
420
const void *_saddr, unsigned int len)
421
{
422
if (type != ETH_P_IPV6)
423
return -EINVAL;
424
425
return 0;
426
}
427
428
/* Packet to BT LE device */
429
static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
430
struct net_device *netdev)
431
{
432
struct msghdr msg;
433
struct kvec iv;
434
int err;
435
436
/* Remember the skb so that we can send EAGAIN to the caller if
437
* we run out of credits.
438
*/
439
chan->data = skb;
440
441
iv.iov_base = skb->data;
442
iv.iov_len = skb->len;
443
444
memset(&msg, 0, sizeof(msg));
445
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, skb->len);
446
447
err = l2cap_chan_send(chan, &msg, skb->len, NULL);
448
if (err > 0) {
449
netdev->stats.tx_bytes += err;
450
netdev->stats.tx_packets++;
451
return 0;
452
}
453
454
if (err < 0)
455
netdev->stats.tx_errors++;
456
457
return err;
458
}
459
460
static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
461
{
462
struct sk_buff *local_skb;
463
struct lowpan_btle_dev *entry;
464
int err = 0;
465
466
rcu_read_lock();
467
468
list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
469
struct lowpan_peer *pentry;
470
struct lowpan_btle_dev *dev;
471
472
if (entry->netdev != netdev)
473
continue;
474
475
dev = lowpan_btle_dev(entry->netdev);
476
477
list_for_each_entry_rcu(pentry, &dev->peers, list) {
478
int ret;
479
480
local_skb = skb_clone(skb, GFP_ATOMIC);
481
482
BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p",
483
netdev->name,
484
&pentry->chan->dst, pentry->chan->dst_type,
485
&pentry->peer_addr, pentry->chan);
486
ret = send_pkt(pentry->chan, local_skb, netdev);
487
if (ret < 0)
488
err = ret;
489
490
kfree_skb(local_skb);
491
}
492
}
493
494
rcu_read_unlock();
495
496
return err;
497
}
498
499
static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
500
{
501
int err = 0;
502
bdaddr_t addr;
503
u8 addr_type;
504
505
/* We must take a copy of the skb before we modify/replace the ipv6
506
* header as the header could be used elsewhere
507
*/
508
skb = skb_unshare(skb, GFP_ATOMIC);
509
if (!skb)
510
return NET_XMIT_DROP;
511
512
/* Return values from setup_header()
513
* <0 - error, packet is dropped
514
* 0 - this is a multicast packet
515
* 1 - this is unicast packet
516
*/
517
err = setup_header(skb, netdev, &addr, &addr_type);
518
if (err < 0) {
519
kfree_skb(skb);
520
return NET_XMIT_DROP;
521
}
522
523
if (err) {
524
if (lowpan_cb(skb)->chan) {
525
BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p",
526
netdev->name, &addr, addr_type,
527
&lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
528
err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
529
} else {
530
err = -ENOENT;
531
}
532
} else {
533
/* We need to send the packet to every device behind this
534
* interface.
535
*/
536
err = send_mcast_pkt(skb, netdev);
537
}
538
539
dev_kfree_skb(skb);
540
541
if (err)
542
BT_DBG("ERROR: xmit failed (%d)", err);
543
544
return err < 0 ? NET_XMIT_DROP : err;
545
}
546
547
static int bt_dev_init(struct net_device *dev)
548
{
549
netdev_lockdep_set_classes(dev);
550
551
return 0;
552
}
553
554
static const struct net_device_ops netdev_ops = {
555
.ndo_init = bt_dev_init,
556
.ndo_start_xmit = bt_xmit,
557
};
558
559
static const struct header_ops header_ops = {
560
.create = header_create,
561
};
562
563
static void netdev_setup(struct net_device *dev)
564
{
565
dev->hard_header_len = 0;
566
dev->needed_tailroom = 0;
567
dev->flags = IFF_RUNNING | IFF_MULTICAST;
568
dev->watchdog_timeo = 0;
569
dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
570
571
dev->netdev_ops = &netdev_ops;
572
dev->header_ops = &header_ops;
573
dev->needs_free_netdev = true;
574
}
575
576
static const struct device_type bt_type = {
577
.name = "bluetooth",
578
};
579
580
static void ifup(struct net_device *netdev)
581
{
582
int err;
583
584
rtnl_lock();
585
err = dev_open(netdev, NULL);
586
if (err < 0)
587
BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
588
rtnl_unlock();
589
}
590
591
static void ifdown(struct net_device *netdev)
592
{
593
rtnl_lock();
594
dev_close(netdev);
595
rtnl_unlock();
596
}
597
598
static void do_notify_peers(struct work_struct *work)
599
{
600
struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev,
601
notify_peers.work);
602
603
netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */
604
}
605
606
static bool is_bt_6lowpan(struct hci_conn *hcon)
607
{
608
if (hcon->type != LE_LINK)
609
return false;
610
611
if (!enable_6lowpan)
612
return false;
613
614
return true;
615
}
616
617
static struct l2cap_chan *chan_create(void)
618
{
619
struct l2cap_chan *chan;
620
621
chan = l2cap_chan_create();
622
if (!chan)
623
return NULL;
624
625
l2cap_chan_set_defaults(chan);
626
627
chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
628
chan->mode = L2CAP_MODE_LE_FLOWCTL;
629
chan->imtu = 1280;
630
631
return chan;
632
}
633
634
static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
635
struct lowpan_btle_dev *dev,
636
bool new_netdev)
637
{
638
struct lowpan_peer *peer;
639
640
peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
641
if (!peer)
642
return NULL;
643
644
peer->chan = chan;
645
646
baswap((void *)peer->lladdr, &chan->dst);
647
648
lowpan_iphc_uncompress_eui48_lladdr(&peer->peer_addr, peer->lladdr);
649
650
spin_lock(&devices_lock);
651
INIT_LIST_HEAD(&peer->list);
652
peer_add(dev, peer);
653
spin_unlock(&devices_lock);
654
655
/* Notifying peers about us needs to be done without locks held */
656
if (new_netdev)
657
INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers);
658
schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100));
659
660
return peer->chan;
661
}
662
663
static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev)
664
{
665
struct net_device *netdev;
666
bdaddr_t addr;
667
int err;
668
669
netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)),
670
IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN,
671
netdev_setup);
672
if (!netdev)
673
return -ENOMEM;
674
675
netdev->addr_assign_type = NET_ADDR_PERM;
676
baswap(&addr, &chan->src);
677
__dev_addr_set(netdev, &addr, sizeof(addr));
678
679
netdev->netdev_ops = &netdev_ops;
680
SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
681
SET_NETDEV_DEVTYPE(netdev, &bt_type);
682
683
*dev = lowpan_btle_dev(netdev);
684
(*dev)->netdev = netdev;
685
(*dev)->hdev = chan->conn->hcon->hdev;
686
INIT_LIST_HEAD(&(*dev)->peers);
687
688
spin_lock(&devices_lock);
689
INIT_LIST_HEAD(&(*dev)->list);
690
list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
691
spin_unlock(&devices_lock);
692
693
err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE);
694
if (err < 0) {
695
BT_INFO("register_netdev failed %d", err);
696
spin_lock(&devices_lock);
697
list_del_rcu(&(*dev)->list);
698
spin_unlock(&devices_lock);
699
free_netdev(netdev);
700
goto out;
701
}
702
703
BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d",
704
netdev->ifindex, &chan->dst, chan->dst_type,
705
&chan->src, chan->src_type);
706
set_bit(__LINK_STATE_PRESENT, &netdev->state);
707
708
return 0;
709
710
out:
711
return err;
712
}
713
714
static inline void chan_ready_cb(struct l2cap_chan *chan)
715
{
716
struct lowpan_btle_dev *dev;
717
bool new_netdev = false;
718
719
dev = lookup_dev(chan->conn);
720
721
BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev);
722
723
if (!dev) {
724
if (setup_netdev(chan, &dev) < 0) {
725
l2cap_chan_del(chan, -ENOENT);
726
return;
727
}
728
new_netdev = true;
729
}
730
731
if (!try_module_get(THIS_MODULE))
732
return;
733
734
add_peer_chan(chan, dev, new_netdev);
735
ifup(dev->netdev);
736
}
737
738
static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
739
{
740
struct l2cap_chan *chan;
741
742
chan = chan_create();
743
if (!chan)
744
return NULL;
745
746
chan->ops = pchan->ops;
747
748
BT_DBG("chan %p pchan %p", chan, pchan);
749
750
return chan;
751
}
752
753
static void delete_netdev(struct work_struct *work)
754
{
755
struct lowpan_btle_dev *entry = container_of(work,
756
struct lowpan_btle_dev,
757
delete_netdev);
758
759
lowpan_unregister_netdev(entry->netdev);
760
761
/* The entry pointer is deleted by the netdev destructor. */
762
}
763
764
static void chan_close_cb(struct l2cap_chan *chan)
765
{
766
struct lowpan_btle_dev *entry;
767
struct lowpan_btle_dev *dev = NULL;
768
struct lowpan_peer *peer;
769
int err = -ENOENT;
770
bool last = false, remove = true;
771
772
BT_DBG("chan %p conn %p", chan, chan->conn);
773
774
if (chan->conn && chan->conn->hcon) {
775
if (!is_bt_6lowpan(chan->conn->hcon))
776
return;
777
778
/* If conn is set, then the netdev is also there and we should
779
* not remove it.
780
*/
781
remove = false;
782
}
783
784
spin_lock(&devices_lock);
785
786
list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
787
dev = lowpan_btle_dev(entry->netdev);
788
peer = __peer_lookup_chan(dev, chan);
789
if (peer) {
790
last = peer_del(dev, peer);
791
err = 0;
792
793
BT_DBG("dev %p removing %speer %p", dev,
794
last ? "last " : "1 ", peer);
795
BT_DBG("chan %p orig refcnt %u", chan,
796
kref_read(&chan->kref));
797
798
l2cap_chan_put(chan);
799
break;
800
}
801
}
802
803
if (!err && last && dev && !atomic_read(&dev->peer_count)) {
804
spin_unlock(&devices_lock);
805
806
cancel_delayed_work_sync(&dev->notify_peers);
807
808
ifdown(dev->netdev);
809
810
if (remove) {
811
INIT_WORK(&entry->delete_netdev, delete_netdev);
812
schedule_work(&entry->delete_netdev);
813
}
814
} else {
815
spin_unlock(&devices_lock);
816
}
817
}
818
819
static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err)
820
{
821
BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn,
822
state_to_string(state), err);
823
}
824
825
static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan,
826
unsigned long hdr_len,
827
unsigned long len, int nb)
828
{
829
struct sk_buff *skb;
830
831
/* Note that we must allocate using GFP_ATOMIC here as
832
* this function is called originally from netdev hard xmit
833
* function in atomic context.
834
*/
835
skb = bt_skb_alloc(hdr_len + len, GFP_ATOMIC);
836
if (!skb)
837
return ERR_PTR(-ENOMEM);
838
return skb;
839
}
840
841
static void chan_suspend_cb(struct l2cap_chan *chan)
842
{
843
struct lowpan_btle_dev *dev;
844
845
BT_DBG("chan %p suspend", chan);
846
847
dev = lookup_dev(chan->conn);
848
if (!dev || !dev->netdev)
849
return;
850
851
netif_stop_queue(dev->netdev);
852
}
853
854
static void chan_resume_cb(struct l2cap_chan *chan)
855
{
856
struct lowpan_btle_dev *dev;
857
858
BT_DBG("chan %p resume", chan);
859
860
dev = lookup_dev(chan->conn);
861
if (!dev || !dev->netdev)
862
return;
863
864
netif_wake_queue(dev->netdev);
865
}
866
867
static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
868
{
869
return L2CAP_CONN_TIMEOUT;
870
}
871
872
static const struct l2cap_ops bt_6lowpan_chan_ops = {
873
.name = "L2CAP 6LoWPAN channel",
874
.new_connection = chan_new_conn_cb,
875
.recv = chan_recv_cb,
876
.close = chan_close_cb,
877
.state_change = chan_state_change_cb,
878
.ready = chan_ready_cb,
879
.resume = chan_resume_cb,
880
.suspend = chan_suspend_cb,
881
.get_sndtimeo = chan_get_sndtimeo_cb,
882
.alloc_skb = chan_alloc_skb_cb,
883
884
.teardown = l2cap_chan_no_teardown,
885
.defer = l2cap_chan_no_defer,
886
.set_shutdown = l2cap_chan_no_set_shutdown,
887
};
888
889
static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type)
890
{
891
struct l2cap_chan *chan;
892
int err;
893
894
chan = chan_create();
895
if (!chan)
896
return -EINVAL;
897
898
chan->ops = &bt_6lowpan_chan_ops;
899
900
err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0,
901
addr, dst_type, L2CAP_CONN_TIMEOUT);
902
903
BT_DBG("chan %p err %d", chan, err);
904
if (err < 0)
905
l2cap_chan_put(chan);
906
907
return err;
908
}
909
910
static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type)
911
{
912
struct lowpan_peer *peer;
913
914
BT_DBG("conn %p dst type %u", conn, dst_type);
915
916
peer = lookup_peer(conn);
917
if (!peer)
918
return -ENOENT;
919
920
BT_DBG("peer %p chan %p", peer, peer->chan);
921
922
l2cap_chan_close(peer->chan, ENOENT);
923
924
return 0;
925
}
926
927
static struct l2cap_chan *bt_6lowpan_listen(void)
928
{
929
bdaddr_t *addr = BDADDR_ANY;
930
struct l2cap_chan *chan;
931
int err;
932
933
if (!enable_6lowpan)
934
return NULL;
935
936
chan = chan_create();
937
if (!chan)
938
return NULL;
939
940
chan->ops = &bt_6lowpan_chan_ops;
941
chan->state = BT_LISTEN;
942
chan->src_type = BDADDR_LE_PUBLIC;
943
944
atomic_set(&chan->nesting, L2CAP_NESTING_PARENT);
945
946
BT_DBG("chan %p src type %u", chan, chan->src_type);
947
948
err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP));
949
if (err) {
950
l2cap_chan_put(chan);
951
BT_ERR("psm cannot be added err %d", err);
952
return NULL;
953
}
954
955
return chan;
956
}
957
958
static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
959
struct l2cap_conn **conn)
960
{
961
struct hci_conn *hcon;
962
struct hci_dev *hdev;
963
int n;
964
965
n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
966
&addr->b[5], &addr->b[4], &addr->b[3],
967
&addr->b[2], &addr->b[1], &addr->b[0],
968
addr_type);
969
970
if (n < 7)
971
return -EINVAL;
972
973
/* The LE_PUBLIC address type is ignored because of BDADDR_ANY */
974
hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC);
975
if (!hdev)
976
return -ENOENT;
977
978
hci_dev_lock(hdev);
979
hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
980
hci_dev_unlock(hdev);
981
hci_dev_put(hdev);
982
983
if (!hcon)
984
return -ENOENT;
985
986
*conn = (struct l2cap_conn *)hcon->l2cap_data;
987
988
BT_DBG("conn %p dst %pMR type %u", *conn, &hcon->dst, hcon->dst_type);
989
990
return 0;
991
}
992
993
static void disconnect_all_peers(void)
994
{
995
struct lowpan_btle_dev *entry;
996
struct lowpan_peer *peer, *tmp_peer, *new_peer;
997
struct list_head peers;
998
999
INIT_LIST_HEAD(&peers);
1000
1001
/* We make a separate list of peers as the close_cb() will
1002
* modify the device peers list so it is better not to mess
1003
* with the same list at the same time.
1004
*/
1005
1006
rcu_read_lock();
1007
1008
list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1009
list_for_each_entry_rcu(peer, &entry->peers, list) {
1010
new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC);
1011
if (!new_peer)
1012
break;
1013
1014
new_peer->chan = peer->chan;
1015
INIT_LIST_HEAD(&new_peer->list);
1016
1017
list_add(&new_peer->list, &peers);
1018
}
1019
}
1020
1021
rcu_read_unlock();
1022
1023
spin_lock(&devices_lock);
1024
list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
1025
l2cap_chan_close(peer->chan, ENOENT);
1026
1027
list_del_rcu(&peer->list);
1028
kfree_rcu(peer, rcu);
1029
}
1030
spin_unlock(&devices_lock);
1031
}
1032
1033
struct set_enable {
1034
struct work_struct work;
1035
bool flag;
1036
};
1037
1038
static void do_enable_set(struct work_struct *work)
1039
{
1040
struct set_enable *set_enable = container_of(work,
1041
struct set_enable, work);
1042
1043
if (!set_enable->flag || enable_6lowpan != set_enable->flag)
1044
/* Disconnect existing connections if 6lowpan is
1045
* disabled
1046
*/
1047
disconnect_all_peers();
1048
1049
enable_6lowpan = set_enable->flag;
1050
1051
mutex_lock(&set_lock);
1052
if (listen_chan) {
1053
l2cap_chan_close(listen_chan, 0);
1054
l2cap_chan_put(listen_chan);
1055
}
1056
1057
listen_chan = bt_6lowpan_listen();
1058
mutex_unlock(&set_lock);
1059
1060
kfree(set_enable);
1061
}
1062
1063
static int lowpan_enable_set(void *data, u64 val)
1064
{
1065
struct set_enable *set_enable;
1066
1067
set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL);
1068
if (!set_enable)
1069
return -ENOMEM;
1070
1071
set_enable->flag = !!val;
1072
INIT_WORK(&set_enable->work, do_enable_set);
1073
1074
schedule_work(&set_enable->work);
1075
1076
return 0;
1077
}
1078
1079
static int lowpan_enable_get(void *data, u64 *val)
1080
{
1081
*val = enable_6lowpan;
1082
return 0;
1083
}
1084
1085
DEFINE_DEBUGFS_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get,
1086
lowpan_enable_set, "%llu\n");
1087
1088
static ssize_t lowpan_control_write(struct file *fp,
1089
const char __user *user_buffer,
1090
size_t count,
1091
loff_t *position)
1092
{
1093
char buf[32];
1094
size_t buf_size = min(count, sizeof(buf) - 1);
1095
int ret;
1096
bdaddr_t addr;
1097
u8 addr_type;
1098
struct l2cap_conn *conn = NULL;
1099
1100
if (copy_from_user(buf, user_buffer, buf_size))
1101
return -EFAULT;
1102
1103
buf[buf_size] = '\0';
1104
1105
if (memcmp(buf, "connect ", 8) == 0) {
1106
ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn);
1107
if (ret == -EINVAL)
1108
return ret;
1109
1110
mutex_lock(&set_lock);
1111
if (listen_chan) {
1112
l2cap_chan_close(listen_chan, 0);
1113
l2cap_chan_put(listen_chan);
1114
listen_chan = NULL;
1115
}
1116
mutex_unlock(&set_lock);
1117
1118
if (conn) {
1119
struct lowpan_peer *peer;
1120
1121
if (!is_bt_6lowpan(conn->hcon))
1122
return -EINVAL;
1123
1124
peer = lookup_peer(conn);
1125
if (peer) {
1126
BT_DBG("6LoWPAN connection already exists");
1127
return -EALREADY;
1128
}
1129
1130
BT_DBG("conn %p dst %pMR type %d user %u", conn,
1131
&conn->hcon->dst, conn->hcon->dst_type,
1132
addr_type);
1133
}
1134
1135
ret = bt_6lowpan_connect(&addr, addr_type);
1136
if (ret < 0)
1137
return ret;
1138
1139
return count;
1140
}
1141
1142
if (memcmp(buf, "disconnect ", 11) == 0) {
1143
ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn);
1144
if (ret < 0)
1145
return ret;
1146
1147
ret = bt_6lowpan_disconnect(conn, addr_type);
1148
if (ret < 0)
1149
return ret;
1150
1151
return count;
1152
}
1153
1154
return count;
1155
}
1156
1157
static int lowpan_control_show(struct seq_file *f, void *ptr)
1158
{
1159
struct lowpan_btle_dev *entry;
1160
struct lowpan_peer *peer;
1161
1162
spin_lock(&devices_lock);
1163
1164
list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1165
list_for_each_entry(peer, &entry->peers, list)
1166
seq_printf(f, "%pMR (type %u)\n",
1167
&peer->chan->dst, peer->chan->dst_type);
1168
}
1169
1170
spin_unlock(&devices_lock);
1171
1172
return 0;
1173
}
1174
1175
static int lowpan_control_open(struct inode *inode, struct file *file)
1176
{
1177
return single_open(file, lowpan_control_show, inode->i_private);
1178
}
1179
1180
static const struct file_operations lowpan_control_fops = {
1181
.open = lowpan_control_open,
1182
.read = seq_read,
1183
.write = lowpan_control_write,
1184
.llseek = seq_lseek,
1185
.release = single_release,
1186
};
1187
1188
static void disconnect_devices(void)
1189
{
1190
struct lowpan_btle_dev *entry, *tmp, *new_dev;
1191
struct list_head devices;
1192
1193
INIT_LIST_HEAD(&devices);
1194
1195
/* We make a separate list of devices because the unregister_netdev()
1196
* will call device_event() which will also want to modify the same
1197
* devices list.
1198
*/
1199
1200
rcu_read_lock();
1201
1202
list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) {
1203
new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC);
1204
if (!new_dev)
1205
break;
1206
1207
new_dev->netdev = entry->netdev;
1208
INIT_LIST_HEAD(&new_dev->list);
1209
1210
list_add_rcu(&new_dev->list, &devices);
1211
}
1212
1213
rcu_read_unlock();
1214
1215
list_for_each_entry_safe(entry, tmp, &devices, list) {
1216
ifdown(entry->netdev);
1217
BT_DBG("Unregistering netdev %s %p",
1218
entry->netdev->name, entry->netdev);
1219
lowpan_unregister_netdev(entry->netdev);
1220
kfree(entry);
1221
}
1222
}
1223
1224
static int device_event(struct notifier_block *unused,
1225
unsigned long event, void *ptr)
1226
{
1227
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1228
struct lowpan_btle_dev *entry;
1229
1230
if (netdev->type != ARPHRD_6LOWPAN)
1231
return NOTIFY_DONE;
1232
1233
switch (event) {
1234
case NETDEV_UNREGISTER:
1235
spin_lock(&devices_lock);
1236
list_for_each_entry(entry, &bt_6lowpan_devices, list) {
1237
if (entry->netdev == netdev) {
1238
BT_DBG("Unregistered netdev %s %p",
1239
netdev->name, netdev);
1240
list_del(&entry->list);
1241
break;
1242
}
1243
}
1244
spin_unlock(&devices_lock);
1245
break;
1246
}
1247
1248
return NOTIFY_DONE;
1249
}
1250
1251
static struct notifier_block bt_6lowpan_dev_notifier = {
1252
.notifier_call = device_event,
1253
};
1254
1255
static int __init bt_6lowpan_init(void)
1256
{
1257
lowpan_enable_debugfs = debugfs_create_file_unsafe("6lowpan_enable",
1258
0644, bt_debugfs,
1259
NULL,
1260
&lowpan_enable_fops);
1261
lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644,
1262
bt_debugfs, NULL,
1263
&lowpan_control_fops);
1264
1265
return register_netdevice_notifier(&bt_6lowpan_dev_notifier);
1266
}
1267
1268
static void __exit bt_6lowpan_exit(void)
1269
{
1270
debugfs_remove(lowpan_enable_debugfs);
1271
debugfs_remove(lowpan_control_debugfs);
1272
1273
if (listen_chan) {
1274
l2cap_chan_close(listen_chan, 0);
1275
l2cap_chan_put(listen_chan);
1276
}
1277
1278
disconnect_devices();
1279
1280
unregister_netdevice_notifier(&bt_6lowpan_dev_notifier);
1281
}
1282
1283
module_init(bt_6lowpan_init);
1284
module_exit(bt_6lowpan_exit);
1285
1286
MODULE_AUTHOR("Jukka Rissanen <[email protected]>");
1287
MODULE_DESCRIPTION("Bluetooth 6LoWPAN");
1288
MODULE_VERSION(VERSION);
1289
MODULE_LICENSE("GPL");
1290
1291