Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/l2tp/l2tp_core.c
15109 views
1
/*
2
* L2TP core.
3
*
4
* Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5
*
6
* This file contains some code of the original L2TPv2 pppol2tp
7
* driver, which has the following copyright:
8
*
9
* Authors: Martijn van Oosterhout <[email protected]>
10
* James Chapman ([email protected])
11
* Contributors:
12
* Michal Ostrowski <[email protected]>
13
* Arnaldo Carvalho de Melo <[email protected]>
14
* David S. Miller ([email protected])
15
*
16
* This program is free software; you can redistribute it and/or modify
17
* it under the terms of the GNU General Public License version 2 as
18
* published by the Free Software Foundation.
19
*/
20
21
#include <linux/module.h>
22
#include <linux/string.h>
23
#include <linux/list.h>
24
#include <linux/rculist.h>
25
#include <linux/uaccess.h>
26
27
#include <linux/kernel.h>
28
#include <linux/spinlock.h>
29
#include <linux/kthread.h>
30
#include <linux/sched.h>
31
#include <linux/slab.h>
32
#include <linux/errno.h>
33
#include <linux/jiffies.h>
34
35
#include <linux/netdevice.h>
36
#include <linux/net.h>
37
#include <linux/inetdevice.h>
38
#include <linux/skbuff.h>
39
#include <linux/init.h>
40
#include <linux/in.h>
41
#include <linux/ip.h>
42
#include <linux/udp.h>
43
#include <linux/l2tp.h>
44
#include <linux/hash.h>
45
#include <linux/sort.h>
46
#include <linux/file.h>
47
#include <linux/nsproxy.h>
48
#include <net/net_namespace.h>
49
#include <net/netns/generic.h>
50
#include <net/dst.h>
51
#include <net/ip.h>
52
#include <net/udp.h>
53
#include <net/inet_common.h>
54
#include <net/xfrm.h>
55
#include <net/protocol.h>
56
57
#include <asm/byteorder.h>
58
#include <asm/atomic.h>
59
60
#include "l2tp_core.h"
61
62
#define L2TP_DRV_VERSION "V2.0"
63
64
/* L2TP header constants */
65
#define L2TP_HDRFLAG_T 0x8000
66
#define L2TP_HDRFLAG_L 0x4000
67
#define L2TP_HDRFLAG_S 0x0800
68
#define L2TP_HDRFLAG_O 0x0200
69
#define L2TP_HDRFLAG_P 0x0100
70
71
#define L2TP_HDR_VER_MASK 0x000F
72
#define L2TP_HDR_VER_2 0x0002
73
#define L2TP_HDR_VER_3 0x0003
74
75
/* L2TPv3 default L2-specific sublayer */
76
#define L2TP_SLFLAG_S 0x40000000
77
#define L2TP_SL_SEQ_MASK 0x00ffffff
78
79
#define L2TP_HDR_SIZE_SEQ 10
80
#define L2TP_HDR_SIZE_NOSEQ 6
81
82
/* Default trace flags */
83
#define L2TP_DEFAULT_DEBUG_FLAGS 0
84
85
#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
86
do { \
87
if ((_mask) & (_type)) \
88
printk(_lvl "L2TP: " _fmt, ##args); \
89
} while (0)
90
91
/* Private data stored for received packets in the skb.
92
*/
93
struct l2tp_skb_cb {
94
u32 ns;
95
u16 has_seq;
96
u16 length;
97
unsigned long expires;
98
};
99
100
#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
101
102
static atomic_t l2tp_tunnel_count;
103
static atomic_t l2tp_session_count;
104
105
/* per-net private data for this module */
106
static unsigned int l2tp_net_id;
107
struct l2tp_net {
108
struct list_head l2tp_tunnel_list;
109
spinlock_t l2tp_tunnel_list_lock;
110
struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
111
spinlock_t l2tp_session_hlist_lock;
112
};
113
114
static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
115
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
116
static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
117
118
static inline struct l2tp_net *l2tp_pernet(struct net *net)
119
{
120
BUG_ON(!net);
121
122
return net_generic(net, l2tp_net_id);
123
}
124
125
126
/* Tunnel reference counts. Incremented per session that is added to
127
* the tunnel.
128
*/
129
static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
130
{
131
atomic_inc(&tunnel->ref_count);
132
}
133
134
static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
135
{
136
if (atomic_dec_and_test(&tunnel->ref_count))
137
l2tp_tunnel_free(tunnel);
138
}
139
#ifdef L2TP_REFCNT_DEBUG
140
#define l2tp_tunnel_inc_refcount(_t) do { \
141
printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
142
l2tp_tunnel_inc_refcount_1(_t); \
143
} while (0)
144
#define l2tp_tunnel_dec_refcount(_t) do { \
145
printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
146
l2tp_tunnel_dec_refcount_1(_t); \
147
} while (0)
148
#else
149
#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
150
#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
151
#endif
152
153
/* Session hash global list for L2TPv3.
154
* The session_id SHOULD be random according to RFC3931, but several
155
* L2TP implementations use incrementing session_ids. So we do a real
156
* hash on the session_id, rather than a simple bitmask.
157
*/
158
static inline struct hlist_head *
159
l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
160
{
161
return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
162
163
}
164
165
/* Lookup a session by id in the global session list
166
*/
167
static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
168
{
169
struct l2tp_net *pn = l2tp_pernet(net);
170
struct hlist_head *session_list =
171
l2tp_session_id_hash_2(pn, session_id);
172
struct l2tp_session *session;
173
struct hlist_node *walk;
174
175
rcu_read_lock_bh();
176
hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) {
177
if (session->session_id == session_id) {
178
rcu_read_unlock_bh();
179
return session;
180
}
181
}
182
rcu_read_unlock_bh();
183
184
return NULL;
185
}
186
187
/* Session hash list.
188
* The session_id SHOULD be random according to RFC2661, but several
189
* L2TP implementations (Cisco and Microsoft) use incrementing
190
* session_ids. So we do a real hash on the session_id, rather than a
191
* simple bitmask.
192
*/
193
static inline struct hlist_head *
194
l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
195
{
196
return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
197
}
198
199
/* Lookup a session by id
200
*/
201
struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
202
{
203
struct hlist_head *session_list;
204
struct l2tp_session *session;
205
struct hlist_node *walk;
206
207
/* In L2TPv3, session_ids are unique over all tunnels and we
208
* sometimes need to look them up before we know the
209
* tunnel.
210
*/
211
if (tunnel == NULL)
212
return l2tp_session_find_2(net, session_id);
213
214
session_list = l2tp_session_id_hash(tunnel, session_id);
215
read_lock_bh(&tunnel->hlist_lock);
216
hlist_for_each_entry(session, walk, session_list, hlist) {
217
if (session->session_id == session_id) {
218
read_unlock_bh(&tunnel->hlist_lock);
219
return session;
220
}
221
}
222
read_unlock_bh(&tunnel->hlist_lock);
223
224
return NULL;
225
}
226
EXPORT_SYMBOL_GPL(l2tp_session_find);
227
228
struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
229
{
230
int hash;
231
struct hlist_node *walk;
232
struct l2tp_session *session;
233
int count = 0;
234
235
read_lock_bh(&tunnel->hlist_lock);
236
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
237
hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) {
238
if (++count > nth) {
239
read_unlock_bh(&tunnel->hlist_lock);
240
return session;
241
}
242
}
243
}
244
245
read_unlock_bh(&tunnel->hlist_lock);
246
247
return NULL;
248
}
249
EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
250
251
/* Lookup a session by interface name.
252
* This is very inefficient but is only used by management interfaces.
253
*/
254
struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
255
{
256
struct l2tp_net *pn = l2tp_pernet(net);
257
int hash;
258
struct hlist_node *walk;
259
struct l2tp_session *session;
260
261
rcu_read_lock_bh();
262
for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
263
hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) {
264
if (!strcmp(session->ifname, ifname)) {
265
rcu_read_unlock_bh();
266
return session;
267
}
268
}
269
}
270
271
rcu_read_unlock_bh();
272
273
return NULL;
274
}
275
EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
276
277
/* Lookup a tunnel by id
278
*/
279
struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
280
{
281
struct l2tp_tunnel *tunnel;
282
struct l2tp_net *pn = l2tp_pernet(net);
283
284
rcu_read_lock_bh();
285
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
286
if (tunnel->tunnel_id == tunnel_id) {
287
rcu_read_unlock_bh();
288
return tunnel;
289
}
290
}
291
rcu_read_unlock_bh();
292
293
return NULL;
294
}
295
EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
296
297
struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
298
{
299
struct l2tp_net *pn = l2tp_pernet(net);
300
struct l2tp_tunnel *tunnel;
301
int count = 0;
302
303
rcu_read_lock_bh();
304
list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
305
if (++count > nth) {
306
rcu_read_unlock_bh();
307
return tunnel;
308
}
309
}
310
311
rcu_read_unlock_bh();
312
313
return NULL;
314
}
315
EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
316
317
/*****************************************************************************
318
* Receive data handling
319
*****************************************************************************/
320
321
/* Queue a skb in order. We come here only if the skb has an L2TP sequence
322
* number.
323
*/
324
static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
325
{
326
struct sk_buff *skbp;
327
struct sk_buff *tmp;
328
u32 ns = L2TP_SKB_CB(skb)->ns;
329
330
spin_lock_bh(&session->reorder_q.lock);
331
skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
332
if (L2TP_SKB_CB(skbp)->ns > ns) {
333
__skb_queue_before(&session->reorder_q, skbp, skb);
334
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
335
"%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
336
session->name, ns, L2TP_SKB_CB(skbp)->ns,
337
skb_queue_len(&session->reorder_q));
338
session->stats.rx_oos_packets++;
339
goto out;
340
}
341
}
342
343
__skb_queue_tail(&session->reorder_q, skb);
344
345
out:
346
spin_unlock_bh(&session->reorder_q.lock);
347
}
348
349
/* Dequeue a single skb.
350
*/
351
static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
352
{
353
struct l2tp_tunnel *tunnel = session->tunnel;
354
int length = L2TP_SKB_CB(skb)->length;
355
356
/* We're about to requeue the skb, so return resources
357
* to its current owner (a socket receive buffer).
358
*/
359
skb_orphan(skb);
360
361
tunnel->stats.rx_packets++;
362
tunnel->stats.rx_bytes += length;
363
session->stats.rx_packets++;
364
session->stats.rx_bytes += length;
365
366
if (L2TP_SKB_CB(skb)->has_seq) {
367
/* Bump our Nr */
368
session->nr++;
369
if (tunnel->version == L2TP_HDR_VER_2)
370
session->nr &= 0xffff;
371
else
372
session->nr &= 0xffffff;
373
374
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
375
"%s: updated nr to %hu\n", session->name, session->nr);
376
}
377
378
/* call private receive handler */
379
if (session->recv_skb != NULL)
380
(*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
381
else
382
kfree_skb(skb);
383
384
if (session->deref)
385
(*session->deref)(session);
386
}
387
388
/* Dequeue skbs from the session's reorder_q, subject to packet order.
389
* Skbs that have been in the queue for too long are simply discarded.
390
*/
391
static void l2tp_recv_dequeue(struct l2tp_session *session)
392
{
393
struct sk_buff *skb;
394
struct sk_buff *tmp;
395
396
/* If the pkt at the head of the queue has the nr that we
397
* expect to send up next, dequeue it and any other
398
* in-sequence packets behind it.
399
*/
400
spin_lock_bh(&session->reorder_q.lock);
401
skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
402
if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
403
session->stats.rx_seq_discards++;
404
session->stats.rx_errors++;
405
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
406
"%s: oos pkt %u len %d discarded (too old), "
407
"waiting for %u, reorder_q_len=%d\n",
408
session->name, L2TP_SKB_CB(skb)->ns,
409
L2TP_SKB_CB(skb)->length, session->nr,
410
skb_queue_len(&session->reorder_q));
411
__skb_unlink(skb, &session->reorder_q);
412
kfree_skb(skb);
413
if (session->deref)
414
(*session->deref)(session);
415
continue;
416
}
417
418
if (L2TP_SKB_CB(skb)->has_seq) {
419
if (L2TP_SKB_CB(skb)->ns != session->nr) {
420
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
421
"%s: holding oos pkt %u len %d, "
422
"waiting for %u, reorder_q_len=%d\n",
423
session->name, L2TP_SKB_CB(skb)->ns,
424
L2TP_SKB_CB(skb)->length, session->nr,
425
skb_queue_len(&session->reorder_q));
426
goto out;
427
}
428
}
429
__skb_unlink(skb, &session->reorder_q);
430
431
/* Process the skb. We release the queue lock while we
432
* do so to let other contexts process the queue.
433
*/
434
spin_unlock_bh(&session->reorder_q.lock);
435
l2tp_recv_dequeue_skb(session, skb);
436
spin_lock_bh(&session->reorder_q.lock);
437
}
438
439
out:
440
spin_unlock_bh(&session->reorder_q.lock);
441
}
442
443
static inline int l2tp_verify_udp_checksum(struct sock *sk,
444
struct sk_buff *skb)
445
{
446
struct udphdr *uh = udp_hdr(skb);
447
u16 ulen = ntohs(uh->len);
448
struct inet_sock *inet;
449
__wsum psum;
450
451
if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
452
return 0;
453
454
inet = inet_sk(sk);
455
psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
456
IPPROTO_UDP, 0);
457
458
if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
459
!csum_fold(csum_add(psum, skb->csum)))
460
return 0;
461
462
skb->csum = psum;
463
464
return __skb_checksum_complete(skb);
465
}
466
467
/* Do receive processing of L2TP data frames. We handle both L2TPv2
468
* and L2TPv3 data frames here.
469
*
470
* L2TPv2 Data Message Header
471
*
472
* 0 1 2 3
473
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
474
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
475
* |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
476
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
477
* | Tunnel ID | Session ID |
478
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
479
* | Ns (opt) | Nr (opt) |
480
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
481
* | Offset Size (opt) | Offset pad... (opt)
482
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
483
*
484
* Data frames are marked by T=0. All other fields are the same as
485
* those in L2TP control frames.
486
*
487
* L2TPv3 Data Message Header
488
*
489
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
490
* | L2TP Session Header |
491
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
492
* | L2-Specific Sublayer |
493
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
494
* | Tunnel Payload ...
495
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
496
*
497
* L2TPv3 Session Header Over IP
498
*
499
* 0 1 2 3
500
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
501
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
502
* | Session ID |
503
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
504
* | Cookie (optional, maximum 64 bits)...
505
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
506
* |
507
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
508
*
509
* L2TPv3 L2-Specific Sublayer Format
510
*
511
* 0 1 2 3
512
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
513
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
514
* |x|S|x|x|x|x|x|x| Sequence Number |
515
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
516
*
517
* Cookie value, sublayer format and offset (pad) are negotiated with
518
* the peer when the session is set up. Unlike L2TPv2, we do not need
519
* to parse the packet header to determine if optional fields are
520
* present.
521
*
522
* Caller must already have parsed the frame and determined that it is
523
* a data (not control) frame before coming here. Fields up to the
524
* session-id have already been parsed and ptr points to the data
525
* after the session-id.
526
*/
527
void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
528
unsigned char *ptr, unsigned char *optr, u16 hdrflags,
529
int length, int (*payload_hook)(struct sk_buff *skb))
530
{
531
struct l2tp_tunnel *tunnel = session->tunnel;
532
int offset;
533
u32 ns, nr;
534
535
/* The ref count is increased since we now hold a pointer to
536
* the session. Take care to decrement the refcnt when exiting
537
* this function from now on...
538
*/
539
l2tp_session_inc_refcount(session);
540
if (session->ref)
541
(*session->ref)(session);
542
543
/* Parse and check optional cookie */
544
if (session->peer_cookie_len > 0) {
545
if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
546
PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
547
"%s: cookie mismatch (%u/%u). Discarding.\n",
548
tunnel->name, tunnel->tunnel_id, session->session_id);
549
session->stats.rx_cookie_discards++;
550
goto discard;
551
}
552
ptr += session->peer_cookie_len;
553
}
554
555
/* Handle the optional sequence numbers. Sequence numbers are
556
* in different places for L2TPv2 and L2TPv3.
557
*
558
* If we are the LAC, enable/disable sequence numbers under
559
* the control of the LNS. If no sequence numbers present but
560
* we were expecting them, discard frame.
561
*/
562
ns = nr = 0;
563
L2TP_SKB_CB(skb)->has_seq = 0;
564
if (tunnel->version == L2TP_HDR_VER_2) {
565
if (hdrflags & L2TP_HDRFLAG_S) {
566
ns = ntohs(*(__be16 *) ptr);
567
ptr += 2;
568
nr = ntohs(*(__be16 *) ptr);
569
ptr += 2;
570
571
/* Store L2TP info in the skb */
572
L2TP_SKB_CB(skb)->ns = ns;
573
L2TP_SKB_CB(skb)->has_seq = 1;
574
575
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
576
"%s: recv data ns=%u, nr=%u, session nr=%u\n",
577
session->name, ns, nr, session->nr);
578
}
579
} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
580
u32 l2h = ntohl(*(__be32 *) ptr);
581
582
if (l2h & 0x40000000) {
583
ns = l2h & 0x00ffffff;
584
585
/* Store L2TP info in the skb */
586
L2TP_SKB_CB(skb)->ns = ns;
587
L2TP_SKB_CB(skb)->has_seq = 1;
588
589
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
590
"%s: recv data ns=%u, session nr=%u\n",
591
session->name, ns, session->nr);
592
}
593
}
594
595
/* Advance past L2-specific header, if present */
596
ptr += session->l2specific_len;
597
598
if (L2TP_SKB_CB(skb)->has_seq) {
599
/* Received a packet with sequence numbers. If we're the LNS,
600
* check if we sre sending sequence numbers and if not,
601
* configure it so.
602
*/
603
if ((!session->lns_mode) && (!session->send_seq)) {
604
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
605
"%s: requested to enable seq numbers by LNS\n",
606
session->name);
607
session->send_seq = -1;
608
l2tp_session_set_header_len(session, tunnel->version);
609
}
610
} else {
611
/* No sequence numbers.
612
* If user has configured mandatory sequence numbers, discard.
613
*/
614
if (session->recv_seq) {
615
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
616
"%s: recv data has no seq numbers when required. "
617
"Discarding\n", session->name);
618
session->stats.rx_seq_discards++;
619
goto discard;
620
}
621
622
/* If we're the LAC and we're sending sequence numbers, the
623
* LNS has requested that we no longer send sequence numbers.
624
* If we're the LNS and we're sending sequence numbers, the
625
* LAC is broken. Discard the frame.
626
*/
627
if ((!session->lns_mode) && (session->send_seq)) {
628
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
629
"%s: requested to disable seq numbers by LNS\n",
630
session->name);
631
session->send_seq = 0;
632
l2tp_session_set_header_len(session, tunnel->version);
633
} else if (session->send_seq) {
634
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
635
"%s: recv data has no seq numbers when required. "
636
"Discarding\n", session->name);
637
session->stats.rx_seq_discards++;
638
goto discard;
639
}
640
}
641
642
/* Session data offset is handled differently for L2TPv2 and
643
* L2TPv3. For L2TPv2, there is an optional 16-bit value in
644
* the header. For L2TPv3, the offset is negotiated using AVPs
645
* in the session setup control protocol.
646
*/
647
if (tunnel->version == L2TP_HDR_VER_2) {
648
/* If offset bit set, skip it. */
649
if (hdrflags & L2TP_HDRFLAG_O) {
650
offset = ntohs(*(__be16 *)ptr);
651
ptr += 2 + offset;
652
}
653
} else
654
ptr += session->offset;
655
656
offset = ptr - optr;
657
if (!pskb_may_pull(skb, offset))
658
goto discard;
659
660
__skb_pull(skb, offset);
661
662
/* If caller wants to process the payload before we queue the
663
* packet, do so now.
664
*/
665
if (payload_hook)
666
if ((*payload_hook)(skb))
667
goto discard;
668
669
/* Prepare skb for adding to the session's reorder_q. Hold
670
* packets for max reorder_timeout or 1 second if not
671
* reordering.
672
*/
673
L2TP_SKB_CB(skb)->length = length;
674
L2TP_SKB_CB(skb)->expires = jiffies +
675
(session->reorder_timeout ? session->reorder_timeout : HZ);
676
677
/* Add packet to the session's receive queue. Reordering is done here, if
678
* enabled. Saved L2TP protocol info is stored in skb->sb[].
679
*/
680
if (L2TP_SKB_CB(skb)->has_seq) {
681
if (session->reorder_timeout != 0) {
682
/* Packet reordering enabled. Add skb to session's
683
* reorder queue, in order of ns.
684
*/
685
l2tp_recv_queue_skb(session, skb);
686
} else {
687
/* Packet reordering disabled. Discard out-of-sequence
688
* packets
689
*/
690
if (L2TP_SKB_CB(skb)->ns != session->nr) {
691
session->stats.rx_seq_discards++;
692
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
693
"%s: oos pkt %u len %d discarded, "
694
"waiting for %u, reorder_q_len=%d\n",
695
session->name, L2TP_SKB_CB(skb)->ns,
696
L2TP_SKB_CB(skb)->length, session->nr,
697
skb_queue_len(&session->reorder_q));
698
goto discard;
699
}
700
skb_queue_tail(&session->reorder_q, skb);
701
}
702
} else {
703
/* No sequence numbers. Add the skb to the tail of the
704
* reorder queue. This ensures that it will be
705
* delivered after all previous sequenced skbs.
706
*/
707
skb_queue_tail(&session->reorder_q, skb);
708
}
709
710
/* Try to dequeue as many skbs from reorder_q as we can. */
711
l2tp_recv_dequeue(session);
712
713
l2tp_session_dec_refcount(session);
714
715
return;
716
717
discard:
718
session->stats.rx_errors++;
719
kfree_skb(skb);
720
721
if (session->deref)
722
(*session->deref)(session);
723
724
l2tp_session_dec_refcount(session);
725
}
726
EXPORT_SYMBOL(l2tp_recv_common);
727
728
/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
729
* here. The skb is not on a list when we get here.
730
* Returns 0 if the packet was a data packet and was successfully passed on.
731
* Returns 1 if the packet was not a good data packet and could not be
732
* forwarded. All such packets are passed up to userspace to deal with.
733
*/
734
static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
735
int (*payload_hook)(struct sk_buff *skb))
736
{
737
struct l2tp_session *session = NULL;
738
unsigned char *ptr, *optr;
739
u16 hdrflags;
740
u32 tunnel_id, session_id;
741
int offset;
742
u16 version;
743
int length;
744
745
if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
746
goto discard_bad_csum;
747
748
/* UDP always verifies the packet length. */
749
__skb_pull(skb, sizeof(struct udphdr));
750
751
/* Short packet? */
752
if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
753
PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
754
"%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
755
goto error;
756
}
757
758
/* Point to L2TP header */
759
optr = ptr = skb->data;
760
761
/* Trace packet contents, if enabled */
762
if (tunnel->debug & L2TP_MSG_DATA) {
763
length = min(32u, skb->len);
764
if (!pskb_may_pull(skb, length))
765
goto error;
766
767
printk(KERN_DEBUG "%s: recv: ", tunnel->name);
768
769
offset = 0;
770
do {
771
printk(" %02X", ptr[offset]);
772
} while (++offset < length);
773
774
printk("\n");
775
}
776
777
/* Get L2TP header flags */
778
hdrflags = ntohs(*(__be16 *) ptr);
779
780
/* Check protocol version */
781
version = hdrflags & L2TP_HDR_VER_MASK;
782
if (version != tunnel->version) {
783
PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
784
"%s: recv protocol version mismatch: got %d expected %d\n",
785
tunnel->name, version, tunnel->version);
786
goto error;
787
}
788
789
/* Get length of L2TP packet */
790
length = skb->len;
791
792
/* If type is control packet, it is handled by userspace. */
793
if (hdrflags & L2TP_HDRFLAG_T) {
794
PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
795
"%s: recv control packet, len=%d\n", tunnel->name, length);
796
goto error;
797
}
798
799
/* Skip flags */
800
ptr += 2;
801
802
if (tunnel->version == L2TP_HDR_VER_2) {
803
/* If length is present, skip it */
804
if (hdrflags & L2TP_HDRFLAG_L)
805
ptr += 2;
806
807
/* Extract tunnel and session ID */
808
tunnel_id = ntohs(*(__be16 *) ptr);
809
ptr += 2;
810
session_id = ntohs(*(__be16 *) ptr);
811
ptr += 2;
812
} else {
813
ptr += 2; /* skip reserved bits */
814
tunnel_id = tunnel->tunnel_id;
815
session_id = ntohl(*(__be32 *) ptr);
816
ptr += 4;
817
}
818
819
/* Find the session context */
820
session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
821
if (!session || !session->recv_skb) {
822
/* Not found? Pass to userspace to deal with */
823
PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
824
"%s: no session found (%u/%u). Passing up.\n",
825
tunnel->name, tunnel_id, session_id);
826
goto error;
827
}
828
829
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
830
831
return 0;
832
833
discard_bad_csum:
834
LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
835
UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
836
tunnel->stats.rx_errors++;
837
kfree_skb(skb);
838
839
return 0;
840
841
error:
842
/* Put UDP header back */
843
__skb_push(skb, sizeof(struct udphdr));
844
845
return 1;
846
}
847
848
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
849
* Return codes:
850
* 0 : success.
851
* <0: error
852
* >0: skb should be passed up to userspace as UDP.
853
*/
854
int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
855
{
856
struct l2tp_tunnel *tunnel;
857
858
tunnel = l2tp_sock_to_tunnel(sk);
859
if (tunnel == NULL)
860
goto pass_up;
861
862
PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
863
"%s: received %d bytes\n", tunnel->name, skb->len);
864
865
if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
866
goto pass_up_put;
867
868
sock_put(sk);
869
return 0;
870
871
pass_up_put:
872
sock_put(sk);
873
pass_up:
874
return 1;
875
}
876
EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
877
878
/************************************************************************
879
* Transmit handling
880
***********************************************************************/
881
882
/* Build an L2TP header for the session into the buffer provided.
883
*/
884
static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
885
{
886
struct l2tp_tunnel *tunnel = session->tunnel;
887
__be16 *bufp = buf;
888
__be16 *optr = buf;
889
u16 flags = L2TP_HDR_VER_2;
890
u32 tunnel_id = tunnel->peer_tunnel_id;
891
u32 session_id = session->peer_session_id;
892
893
if (session->send_seq)
894
flags |= L2TP_HDRFLAG_S;
895
896
/* Setup L2TP header. */
897
*bufp++ = htons(flags);
898
*bufp++ = htons(tunnel_id);
899
*bufp++ = htons(session_id);
900
if (session->send_seq) {
901
*bufp++ = htons(session->ns);
902
*bufp++ = 0;
903
session->ns++;
904
session->ns &= 0xffff;
905
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
906
"%s: updated ns to %u\n", session->name, session->ns);
907
}
908
909
return bufp - optr;
910
}
911
912
static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
913
{
914
struct l2tp_tunnel *tunnel = session->tunnel;
915
char *bufp = buf;
916
char *optr = bufp;
917
918
/* Setup L2TP header. The header differs slightly for UDP and
919
* IP encapsulations. For UDP, there is 4 bytes of flags.
920
*/
921
if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
922
u16 flags = L2TP_HDR_VER_3;
923
*((__be16 *) bufp) = htons(flags);
924
bufp += 2;
925
*((__be16 *) bufp) = 0;
926
bufp += 2;
927
}
928
929
*((__be32 *) bufp) = htonl(session->peer_session_id);
930
bufp += 4;
931
if (session->cookie_len) {
932
memcpy(bufp, &session->cookie[0], session->cookie_len);
933
bufp += session->cookie_len;
934
}
935
if (session->l2specific_len) {
936
if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
937
u32 l2h = 0;
938
if (session->send_seq) {
939
l2h = 0x40000000 | session->ns;
940
session->ns++;
941
session->ns &= 0xffffff;
942
PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
943
"%s: updated ns to %u\n", session->name, session->ns);
944
}
945
946
*((__be32 *) bufp) = htonl(l2h);
947
}
948
bufp += session->l2specific_len;
949
}
950
if (session->offset)
951
bufp += session->offset;
952
953
return bufp - optr;
954
}
955
956
static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
957
struct flowi *fl, size_t data_len)
958
{
959
struct l2tp_tunnel *tunnel = session->tunnel;
960
unsigned int len = skb->len;
961
int error;
962
963
/* Debug */
964
if (session->send_seq)
965
PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
966
"%s: send %Zd bytes, ns=%u\n", session->name,
967
data_len, session->ns - 1);
968
else
969
PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
970
"%s: send %Zd bytes\n", session->name, data_len);
971
972
if (session->debug & L2TP_MSG_DATA) {
973
int i;
974
int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
975
unsigned char *datap = skb->data + uhlen;
976
977
printk(KERN_DEBUG "%s: xmit:", session->name);
978
for (i = 0; i < (len - uhlen); i++) {
979
printk(" %02X", *datap++);
980
if (i == 31) {
981
printk(" ...");
982
break;
983
}
984
}
985
printk("\n");
986
}
987
988
/* Queue the packet to IP for output */
989
skb->local_df = 1;
990
error = ip_queue_xmit(skb, fl);
991
992
/* Update stats */
993
if (error >= 0) {
994
tunnel->stats.tx_packets++;
995
tunnel->stats.tx_bytes += len;
996
session->stats.tx_packets++;
997
session->stats.tx_bytes += len;
998
} else {
999
tunnel->stats.tx_errors++;
1000
session->stats.tx_errors++;
1001
}
1002
1003
return 0;
1004
}
1005
1006
/* Automatically called when the skb is freed.
1007
*/
1008
static void l2tp_sock_wfree(struct sk_buff *skb)
1009
{
1010
sock_put(skb->sk);
1011
}
1012
1013
/* For data skbs that we transmit, we associate with the tunnel socket
1014
* but don't do accounting.
1015
*/
1016
static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1017
{
1018
sock_hold(sk);
1019
skb->sk = sk;
1020
skb->destructor = l2tp_sock_wfree;
1021
}
1022
1023
/* If caller requires the skb to have a ppp header, the header must be
1024
* inserted in the skb data before calling this function.
1025
*/
1026
int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
1027
{
1028
int data_len = skb->len;
1029
struct l2tp_tunnel *tunnel = session->tunnel;
1030
struct sock *sk = tunnel->sock;
1031
struct flowi *fl;
1032
struct udphdr *uh;
1033
struct inet_sock *inet;
1034
__wsum csum;
1035
int old_headroom;
1036
int new_headroom;
1037
int headroom;
1038
int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1039
int udp_len;
1040
1041
/* Check that there's enough headroom in the skb to insert IP,
1042
* UDP and L2TP headers. If not enough, expand it to
1043
* make room. Adjust truesize.
1044
*/
1045
headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1046
uhlen + hdr_len;
1047
old_headroom = skb_headroom(skb);
1048
if (skb_cow_head(skb, headroom))
1049
goto abort;
1050
1051
new_headroom = skb_headroom(skb);
1052
skb_orphan(skb);
1053
skb->truesize += new_headroom - old_headroom;
1054
1055
/* Setup L2TP header */
1056
session->build_header(session, __skb_push(skb, hdr_len));
1057
1058
/* Reset skb netfilter state */
1059
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1060
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1061
IPSKB_REROUTED);
1062
nf_reset(skb);
1063
1064
bh_lock_sock(sk);
1065
if (sock_owned_by_user(sk)) {
1066
dev_kfree_skb(skb);
1067
goto out_unlock;
1068
}
1069
1070
/* Get routing info from the tunnel socket */
1071
skb_dst_drop(skb);
1072
skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
1073
1074
inet = inet_sk(sk);
1075
fl = &inet->cork.fl;
1076
switch (tunnel->encap) {
1077
case L2TP_ENCAPTYPE_UDP:
1078
/* Setup UDP header */
1079
__skb_push(skb, sizeof(*uh));
1080
skb_reset_transport_header(skb);
1081
uh = udp_hdr(skb);
1082
uh->source = inet->inet_sport;
1083
uh->dest = inet->inet_dport;
1084
udp_len = uhlen + hdr_len + data_len;
1085
uh->len = htons(udp_len);
1086
uh->check = 0;
1087
1088
/* Calculate UDP checksum if configured to do so */
1089
if (sk->sk_no_check == UDP_CSUM_NOXMIT)
1090
skb->ip_summed = CHECKSUM_NONE;
1091
else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1092
(!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1093
skb->ip_summed = CHECKSUM_COMPLETE;
1094
csum = skb_checksum(skb, 0, udp_len, 0);
1095
uh->check = csum_tcpudp_magic(inet->inet_saddr,
1096
inet->inet_daddr,
1097
udp_len, IPPROTO_UDP, csum);
1098
if (uh->check == 0)
1099
uh->check = CSUM_MANGLED_0;
1100
} else {
1101
skb->ip_summed = CHECKSUM_PARTIAL;
1102
skb->csum_start = skb_transport_header(skb) - skb->head;
1103
skb->csum_offset = offsetof(struct udphdr, check);
1104
uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1105
inet->inet_daddr,
1106
udp_len, IPPROTO_UDP, 0);
1107
}
1108
break;
1109
1110
case L2TP_ENCAPTYPE_IP:
1111
break;
1112
}
1113
1114
l2tp_skb_set_owner_w(skb, sk);
1115
1116
l2tp_xmit_core(session, skb, fl, data_len);
1117
out_unlock:
1118
bh_unlock_sock(sk);
1119
1120
abort:
1121
return 0;
1122
}
1123
EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1124
1125
/*****************************************************************************
1126
* Tinnel and session create/destroy.
1127
*****************************************************************************/
1128
1129
/* Tunnel socket destruct hook.
1130
* The tunnel context is deleted only when all session sockets have been
1131
* closed.
1132
*/
1133
static void l2tp_tunnel_destruct(struct sock *sk)
1134
{
1135
struct l2tp_tunnel *tunnel;
1136
1137
tunnel = sk->sk_user_data;
1138
if (tunnel == NULL)
1139
goto end;
1140
1141
PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
1142
"%s: closing...\n", tunnel->name);
1143
1144
/* Close all sessions */
1145
l2tp_tunnel_closeall(tunnel);
1146
1147
switch (tunnel->encap) {
1148
case L2TP_ENCAPTYPE_UDP:
1149
/* No longer an encapsulation socket. See net/ipv4/udp.c */
1150
(udp_sk(sk))->encap_type = 0;
1151
(udp_sk(sk))->encap_rcv = NULL;
1152
break;
1153
case L2TP_ENCAPTYPE_IP:
1154
break;
1155
}
1156
1157
/* Remove hooks into tunnel socket */
1158
tunnel->sock = NULL;
1159
sk->sk_destruct = tunnel->old_sk_destruct;
1160
sk->sk_user_data = NULL;
1161
1162
/* Call the original destructor */
1163
if (sk->sk_destruct)
1164
(*sk->sk_destruct)(sk);
1165
1166
/* We're finished with the socket */
1167
l2tp_tunnel_dec_refcount(tunnel);
1168
1169
end:
1170
return;
1171
}
1172
1173
/* When the tunnel is closed, all the attached sessions need to go too.
1174
*/
1175
static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1176
{
1177
int hash;
1178
struct hlist_node *walk;
1179
struct hlist_node *tmp;
1180
struct l2tp_session *session;
1181
1182
BUG_ON(tunnel == NULL);
1183
1184
PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
1185
"%s: closing all sessions...\n", tunnel->name);
1186
1187
write_lock_bh(&tunnel->hlist_lock);
1188
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1189
again:
1190
hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1191
session = hlist_entry(walk, struct l2tp_session, hlist);
1192
1193
PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO,
1194
"%s: closing session\n", session->name);
1195
1196
hlist_del_init(&session->hlist);
1197
1198
/* Since we should hold the sock lock while
1199
* doing any unbinding, we need to release the
1200
* lock we're holding before taking that lock.
1201
* Hold a reference to the sock so it doesn't
1202
* disappear as we're jumping between locks.
1203
*/
1204
if (session->ref != NULL)
1205
(*session->ref)(session);
1206
1207
write_unlock_bh(&tunnel->hlist_lock);
1208
1209
if (tunnel->version != L2TP_HDR_VER_2) {
1210
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1211
1212
spin_lock_bh(&pn->l2tp_session_hlist_lock);
1213
hlist_del_init_rcu(&session->global_hlist);
1214
spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1215
synchronize_rcu();
1216
}
1217
1218
if (session->session_close != NULL)
1219
(*session->session_close)(session);
1220
1221
if (session->deref != NULL)
1222
(*session->deref)(session);
1223
1224
write_lock_bh(&tunnel->hlist_lock);
1225
1226
/* Now restart from the beginning of this hash
1227
* chain. We always remove a session from the
1228
* list so we are guaranteed to make forward
1229
* progress.
1230
*/
1231
goto again;
1232
}
1233
}
1234
write_unlock_bh(&tunnel->hlist_lock);
1235
}
1236
1237
/* Really kill the tunnel.
1238
* Come here only when all sessions have been cleared from the tunnel.
1239
*/
1240
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1241
{
1242
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1243
1244
BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1245
BUG_ON(tunnel->sock != NULL);
1246
1247
PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
1248
"%s: free...\n", tunnel->name);
1249
1250
/* Remove from tunnel list */
1251
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1252
list_del_rcu(&tunnel->list);
1253
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1254
synchronize_rcu();
1255
1256
atomic_dec(&l2tp_tunnel_count);
1257
kfree(tunnel);
1258
}
1259
1260
/* Create a socket for the tunnel, if one isn't set up by
1261
* userspace. This is used for static tunnels where there is no
1262
* managing L2TP daemon.
1263
*/
1264
static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp)
1265
{
1266
int err = -EINVAL;
1267
struct sockaddr_in udp_addr;
1268
struct sockaddr_l2tpip ip_addr;
1269
struct socket *sock = NULL;
1270
1271
switch (cfg->encap) {
1272
case L2TP_ENCAPTYPE_UDP:
1273
err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
1274
if (err < 0)
1275
goto out;
1276
1277
sock = *sockp;
1278
1279
memset(&udp_addr, 0, sizeof(udp_addr));
1280
udp_addr.sin_family = AF_INET;
1281
udp_addr.sin_addr = cfg->local_ip;
1282
udp_addr.sin_port = htons(cfg->local_udp_port);
1283
err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr));
1284
if (err < 0)
1285
goto out;
1286
1287
udp_addr.sin_family = AF_INET;
1288
udp_addr.sin_addr = cfg->peer_ip;
1289
udp_addr.sin_port = htons(cfg->peer_udp_port);
1290
err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0);
1291
if (err < 0)
1292
goto out;
1293
1294
if (!cfg->use_udp_checksums)
1295
sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
1296
1297
break;
1298
1299
case L2TP_ENCAPTYPE_IP:
1300
err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp);
1301
if (err < 0)
1302
goto out;
1303
1304
sock = *sockp;
1305
1306
memset(&ip_addr, 0, sizeof(ip_addr));
1307
ip_addr.l2tp_family = AF_INET;
1308
ip_addr.l2tp_addr = cfg->local_ip;
1309
ip_addr.l2tp_conn_id = tunnel_id;
1310
err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr));
1311
if (err < 0)
1312
goto out;
1313
1314
ip_addr.l2tp_family = AF_INET;
1315
ip_addr.l2tp_addr = cfg->peer_ip;
1316
ip_addr.l2tp_conn_id = peer_tunnel_id;
1317
err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0);
1318
if (err < 0)
1319
goto out;
1320
1321
break;
1322
1323
default:
1324
goto out;
1325
}
1326
1327
out:
1328
if ((err < 0) && sock) {
1329
sock_release(sock);
1330
*sockp = NULL;
1331
}
1332
1333
return err;
1334
}
1335
1336
int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1337
{
1338
struct l2tp_tunnel *tunnel = NULL;
1339
int err;
1340
struct socket *sock = NULL;
1341
struct sock *sk = NULL;
1342
struct l2tp_net *pn;
1343
enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1344
1345
/* Get the tunnel socket from the fd, which was opened by
1346
* the userspace L2TP daemon. If not specified, create a
1347
* kernel socket.
1348
*/
1349
if (fd < 0) {
1350
err = l2tp_tunnel_sock_create(tunnel_id, peer_tunnel_id, cfg, &sock);
1351
if (err < 0)
1352
goto err;
1353
} else {
1354
err = -EBADF;
1355
sock = sockfd_lookup(fd, &err);
1356
if (!sock) {
1357
printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
1358
tunnel_id, fd, err);
1359
goto err;
1360
}
1361
}
1362
1363
sk = sock->sk;
1364
1365
if (cfg != NULL)
1366
encap = cfg->encap;
1367
1368
/* Quick sanity checks */
1369
switch (encap) {
1370
case L2TP_ENCAPTYPE_UDP:
1371
err = -EPROTONOSUPPORT;
1372
if (sk->sk_protocol != IPPROTO_UDP) {
1373
printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1374
tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1375
goto err;
1376
}
1377
break;
1378
case L2TP_ENCAPTYPE_IP:
1379
err = -EPROTONOSUPPORT;
1380
if (sk->sk_protocol != IPPROTO_L2TP) {
1381
printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1382
tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1383
goto err;
1384
}
1385
break;
1386
}
1387
1388
/* Check if this socket has already been prepped */
1389
tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
1390
if (tunnel != NULL) {
1391
/* This socket has already been prepped */
1392
err = -EBUSY;
1393
goto err;
1394
}
1395
1396
tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1397
if (tunnel == NULL) {
1398
err = -ENOMEM;
1399
goto err;
1400
}
1401
1402
tunnel->version = version;
1403
tunnel->tunnel_id = tunnel_id;
1404
tunnel->peer_tunnel_id = peer_tunnel_id;
1405
tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1406
1407
tunnel->magic = L2TP_TUNNEL_MAGIC;
1408
sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1409
rwlock_init(&tunnel->hlist_lock);
1410
1411
/* The net we belong to */
1412
tunnel->l2tp_net = net;
1413
pn = l2tp_pernet(net);
1414
1415
if (cfg != NULL)
1416
tunnel->debug = cfg->debug;
1417
1418
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1419
tunnel->encap = encap;
1420
if (encap == L2TP_ENCAPTYPE_UDP) {
1421
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1422
udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1423
udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1424
}
1425
1426
sk->sk_user_data = tunnel;
1427
1428
/* Hook on the tunnel socket destructor so that we can cleanup
1429
* if the tunnel socket goes away.
1430
*/
1431
tunnel->old_sk_destruct = sk->sk_destruct;
1432
sk->sk_destruct = &l2tp_tunnel_destruct;
1433
tunnel->sock = sk;
1434
sk->sk_allocation = GFP_ATOMIC;
1435
1436
/* Add tunnel to our list */
1437
INIT_LIST_HEAD(&tunnel->list);
1438
atomic_inc(&l2tp_tunnel_count);
1439
1440
/* Bump the reference count. The tunnel context is deleted
1441
* only when this drops to zero. Must be done before list insertion
1442
*/
1443
l2tp_tunnel_inc_refcount(tunnel);
1444
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1445
list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1446
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1447
1448
err = 0;
1449
err:
1450
if (tunnelp)
1451
*tunnelp = tunnel;
1452
1453
/* If tunnel's socket was created by the kernel, it doesn't
1454
* have a file.
1455
*/
1456
if (sock && sock->file)
1457
sockfd_put(sock);
1458
1459
return err;
1460
}
1461
EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1462
1463
/* This function is used by the netlink TUNNEL_DELETE command.
1464
*/
1465
int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1466
{
1467
int err = 0;
1468
struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
1469
1470
/* Force the tunnel socket to close. This will eventually
1471
* cause the tunnel to be deleted via the normal socket close
1472
* mechanisms when userspace closes the tunnel socket.
1473
*/
1474
if (sock != NULL) {
1475
err = inet_shutdown(sock, 2);
1476
1477
/* If the tunnel's socket was created by the kernel,
1478
* close the socket here since the socket was not
1479
* created by userspace.
1480
*/
1481
if (sock->file == NULL)
1482
err = inet_release(sock);
1483
}
1484
1485
return err;
1486
}
1487
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1488
1489
/* Really kill the session.
1490
*/
1491
void l2tp_session_free(struct l2tp_session *session)
1492
{
1493
struct l2tp_tunnel *tunnel;
1494
1495
BUG_ON(atomic_read(&session->ref_count) != 0);
1496
1497
tunnel = session->tunnel;
1498
if (tunnel != NULL) {
1499
BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1500
1501
/* Delete the session from the hash */
1502
write_lock_bh(&tunnel->hlist_lock);
1503
hlist_del_init(&session->hlist);
1504
write_unlock_bh(&tunnel->hlist_lock);
1505
1506
/* Unlink from the global hash if not L2TPv2 */
1507
if (tunnel->version != L2TP_HDR_VER_2) {
1508
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1509
1510
spin_lock_bh(&pn->l2tp_session_hlist_lock);
1511
hlist_del_init_rcu(&session->global_hlist);
1512
spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1513
synchronize_rcu();
1514
}
1515
1516
if (session->session_id != 0)
1517
atomic_dec(&l2tp_session_count);
1518
1519
sock_put(tunnel->sock);
1520
1521
/* This will delete the tunnel context if this
1522
* is the last session on the tunnel.
1523
*/
1524
session->tunnel = NULL;
1525
l2tp_tunnel_dec_refcount(tunnel);
1526
}
1527
1528
kfree(session);
1529
1530
return;
1531
}
1532
EXPORT_SYMBOL_GPL(l2tp_session_free);
1533
1534
/* This function is used by the netlink SESSION_DELETE command and by
1535
pseudowire modules.
1536
*/
1537
int l2tp_session_delete(struct l2tp_session *session)
1538
{
1539
if (session->session_close != NULL)
1540
(*session->session_close)(session);
1541
1542
l2tp_session_dec_refcount(session);
1543
1544
return 0;
1545
}
1546
EXPORT_SYMBOL_GPL(l2tp_session_delete);
1547
1548
1549
/* We come here whenever a session's send_seq, cookie_len or
1550
* l2specific_len parameters are set.
1551
*/
1552
static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1553
{
1554
if (version == L2TP_HDR_VER_2) {
1555
session->hdr_len = 6;
1556
if (session->send_seq)
1557
session->hdr_len += 4;
1558
} else {
1559
session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
1560
if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1561
session->hdr_len += 4;
1562
}
1563
1564
}
1565
1566
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1567
{
1568
struct l2tp_session *session;
1569
1570
session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1571
if (session != NULL) {
1572
session->magic = L2TP_SESSION_MAGIC;
1573
session->tunnel = tunnel;
1574
1575
session->session_id = session_id;
1576
session->peer_session_id = peer_session_id;
1577
session->nr = 1;
1578
1579
sprintf(&session->name[0], "sess %u/%u",
1580
tunnel->tunnel_id, session->session_id);
1581
1582
skb_queue_head_init(&session->reorder_q);
1583
1584
INIT_HLIST_NODE(&session->hlist);
1585
INIT_HLIST_NODE(&session->global_hlist);
1586
1587
/* Inherit debug options from tunnel */
1588
session->debug = tunnel->debug;
1589
1590
if (cfg) {
1591
session->pwtype = cfg->pw_type;
1592
session->debug = cfg->debug;
1593
session->mtu = cfg->mtu;
1594
session->mru = cfg->mru;
1595
session->send_seq = cfg->send_seq;
1596
session->recv_seq = cfg->recv_seq;
1597
session->lns_mode = cfg->lns_mode;
1598
session->reorder_timeout = cfg->reorder_timeout;
1599
session->offset = cfg->offset;
1600
session->l2specific_type = cfg->l2specific_type;
1601
session->l2specific_len = cfg->l2specific_len;
1602
session->cookie_len = cfg->cookie_len;
1603
memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1604
session->peer_cookie_len = cfg->peer_cookie_len;
1605
memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1606
}
1607
1608
if (tunnel->version == L2TP_HDR_VER_2)
1609
session->build_header = l2tp_build_l2tpv2_header;
1610
else
1611
session->build_header = l2tp_build_l2tpv3_header;
1612
1613
l2tp_session_set_header_len(session, tunnel->version);
1614
1615
/* Bump the reference count. The session context is deleted
1616
* only when this drops to zero.
1617
*/
1618
l2tp_session_inc_refcount(session);
1619
l2tp_tunnel_inc_refcount(tunnel);
1620
1621
/* Ensure tunnel socket isn't deleted */
1622
sock_hold(tunnel->sock);
1623
1624
/* Add session to the tunnel's hash list */
1625
write_lock_bh(&tunnel->hlist_lock);
1626
hlist_add_head(&session->hlist,
1627
l2tp_session_id_hash(tunnel, session_id));
1628
write_unlock_bh(&tunnel->hlist_lock);
1629
1630
/* And to the global session list if L2TPv3 */
1631
if (tunnel->version != L2TP_HDR_VER_2) {
1632
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1633
1634
spin_lock_bh(&pn->l2tp_session_hlist_lock);
1635
hlist_add_head_rcu(&session->global_hlist,
1636
l2tp_session_id_hash_2(pn, session_id));
1637
spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1638
}
1639
1640
/* Ignore management session in session count value */
1641
if (session->session_id != 0)
1642
atomic_inc(&l2tp_session_count);
1643
}
1644
1645
return session;
1646
}
1647
EXPORT_SYMBOL_GPL(l2tp_session_create);
1648
1649
/*****************************************************************************
1650
* Init and cleanup
1651
*****************************************************************************/
1652
1653
static __net_init int l2tp_init_net(struct net *net)
1654
{
1655
struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1656
int hash;
1657
1658
INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1659
spin_lock_init(&pn->l2tp_tunnel_list_lock);
1660
1661
for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1662
INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1663
1664
spin_lock_init(&pn->l2tp_session_hlist_lock);
1665
1666
return 0;
1667
}
1668
1669
static struct pernet_operations l2tp_net_ops = {
1670
.init = l2tp_init_net,
1671
.id = &l2tp_net_id,
1672
.size = sizeof(struct l2tp_net),
1673
};
1674
1675
static int __init l2tp_init(void)
1676
{
1677
int rc = 0;
1678
1679
rc = register_pernet_device(&l2tp_net_ops);
1680
if (rc)
1681
goto out;
1682
1683
printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION);
1684
1685
out:
1686
return rc;
1687
}
1688
1689
static void __exit l2tp_exit(void)
1690
{
1691
unregister_pernet_device(&l2tp_net_ops);
1692
}
1693
1694
module_init(l2tp_init);
1695
module_exit(l2tp_exit);
1696
1697
MODULE_AUTHOR("James Chapman <[email protected]>");
1698
MODULE_DESCRIPTION("L2TP core");
1699
MODULE_LICENSE("GPL");
1700
MODULE_VERSION(L2TP_DRV_VERSION);
1701
1702
1703