Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/l2tp/l2tp_core.c
49621 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* L2TP core.
3
*
4
* Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5
*
6
* This file contains some code of the original L2TPv2 pppol2tp
7
* driver, which has the following copyright:
8
*
9
* Authors: Martijn van Oosterhout <[email protected]>
10
* James Chapman ([email protected])
11
* Contributors:
12
* Michal Ostrowski <[email protected]>
13
* Arnaldo Carvalho de Melo <[email protected]>
14
* David S. Miller ([email protected])
15
*/
16
17
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19
#include <linux/module.h>
20
#include <linux/string.h>
21
#include <linux/list.h>
22
#include <linux/rculist.h>
23
#include <linux/uaccess.h>
24
25
#include <linux/kernel.h>
26
#include <linux/spinlock.h>
27
#include <linux/kthread.h>
28
#include <linux/sched.h>
29
#include <linux/slab.h>
30
#include <linux/errno.h>
31
#include <linux/jiffies.h>
32
33
#include <linux/netdevice.h>
34
#include <linux/net.h>
35
#include <linux/inetdevice.h>
36
#include <linux/skbuff.h>
37
#include <linux/init.h>
38
#include <linux/in.h>
39
#include <linux/ip.h>
40
#include <linux/udp.h>
41
#include <linux/l2tp.h>
42
#include <linux/sort.h>
43
#include <linux/file.h>
44
#include <linux/nsproxy.h>
45
#include <net/net_namespace.h>
46
#include <net/netns/generic.h>
47
#include <net/dst.h>
48
#include <net/ip.h>
49
#include <net/udp.h>
50
#include <net/udp_tunnel.h>
51
#include <net/inet_common.h>
52
#include <net/xfrm.h>
53
#include <net/protocol.h>
54
#include <net/inet6_connection_sock.h>
55
#include <net/inet_ecn.h>
56
#include <net/ip6_route.h>
57
#include <net/ip6_checksum.h>
58
59
#include <asm/byteorder.h>
60
#include <linux/atomic.h>
61
62
#include "l2tp_core.h"
63
64
#define CREATE_TRACE_POINTS
65
#include "trace.h"
66
67
#define L2TP_DRV_VERSION "V2.0"
68
69
/* L2TP header constants */
70
#define L2TP_HDRFLAG_T 0x8000
71
#define L2TP_HDRFLAG_L 0x4000
72
#define L2TP_HDRFLAG_S 0x0800
73
#define L2TP_HDRFLAG_O 0x0200
74
#define L2TP_HDRFLAG_P 0x0100
75
76
#define L2TP_HDR_VER_MASK 0x000F
77
#define L2TP_HDR_VER_2 0x0002
78
#define L2TP_HDR_VER_3 0x0003
79
80
/* L2TPv3 default L2-specific sublayer */
81
#define L2TP_SLFLAG_S 0x40000000
82
#define L2TP_SL_SEQ_MASK 0x00ffffff
83
84
#define L2TP_HDR_SIZE_MAX 14
85
86
/* Default trace flags */
87
#define L2TP_DEFAULT_DEBUG_FLAGS 0
88
89
#define L2TP_DEPTH_NESTING 2
90
#if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
91
#error "L2TP requires its own lockdep subclass"
92
#endif
93
94
/* Private data stored for received packets in the skb.
95
*/
96
struct l2tp_skb_cb {
97
u32 ns;
98
u16 has_seq;
99
u16 length;
100
unsigned long expires;
101
};
102
103
#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
104
105
static struct workqueue_struct *l2tp_wq;
106
107
/* per-net private data for this module */
108
static unsigned int l2tp_net_id;
109
struct l2tp_net {
110
/* Lock for write access to l2tp_tunnel_idr */
111
spinlock_t l2tp_tunnel_idr_lock;
112
struct idr l2tp_tunnel_idr;
113
/* Lock for write access to l2tp_v[23]_session_idr/htable */
114
spinlock_t l2tp_session_idr_lock;
115
struct idr l2tp_v2_session_idr;
116
struct idr l2tp_v3_session_idr;
117
struct hlist_head l2tp_v3_session_htable[16];
118
};
119
120
static u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id)
121
{
122
return ((u32)tunnel_id) << 16 | session_id;
123
}
124
125
static unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id)
126
{
127
return ((unsigned long)sk) + session_id;
128
}
129
130
#if IS_ENABLED(CONFIG_IPV6)
131
static bool l2tp_sk_is_v6(struct sock *sk)
132
{
133
return sk->sk_family == PF_INET6 &&
134
!ipv6_addr_v4mapped(&sk->sk_v6_daddr);
135
}
136
#endif
137
138
static struct l2tp_net *l2tp_pernet(const struct net *net)
139
{
140
return net_generic(net, l2tp_net_id);
141
}
142
143
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
144
{
145
struct sock *sk = tunnel->sock;
146
147
trace_free_tunnel(tunnel);
148
149
if (sk) {
150
/* Disable udp encapsulation */
151
switch (tunnel->encap) {
152
case L2TP_ENCAPTYPE_UDP:
153
/* No longer an encapsulation socket. See net/ipv4/udp.c */
154
WRITE_ONCE(udp_sk(sk)->encap_type, 0);
155
udp_sk(sk)->encap_rcv = NULL;
156
udp_sk(sk)->encap_destroy = NULL;
157
break;
158
case L2TP_ENCAPTYPE_IP:
159
break;
160
}
161
162
tunnel->sock = NULL;
163
sock_put(sk);
164
}
165
166
kfree_rcu(tunnel, rcu);
167
}
168
169
static void l2tp_session_free(struct l2tp_session *session)
170
{
171
trace_free_session(session);
172
if (session->tunnel)
173
l2tp_tunnel_put(session->tunnel);
174
kfree_rcu(session, rcu);
175
}
176
177
struct l2tp_tunnel *l2tp_sk_to_tunnel(const struct sock *sk)
178
{
179
const struct net *net = sock_net(sk);
180
unsigned long tunnel_id, tmp;
181
struct l2tp_tunnel *tunnel;
182
struct l2tp_net *pn;
183
184
rcu_read_lock_bh();
185
pn = l2tp_pernet(net);
186
idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
187
if (tunnel &&
188
tunnel->sock == sk &&
189
refcount_inc_not_zero(&tunnel->ref_count)) {
190
rcu_read_unlock_bh();
191
return tunnel;
192
}
193
}
194
rcu_read_unlock_bh();
195
196
return NULL;
197
}
198
EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
199
200
void l2tp_tunnel_put(struct l2tp_tunnel *tunnel)
201
{
202
if (refcount_dec_and_test(&tunnel->ref_count))
203
l2tp_tunnel_free(tunnel);
204
}
205
EXPORT_SYMBOL_GPL(l2tp_tunnel_put);
206
207
void l2tp_session_put(struct l2tp_session *session)
208
{
209
if (refcount_dec_and_test(&session->ref_count))
210
l2tp_session_free(session);
211
}
212
EXPORT_SYMBOL_GPL(l2tp_session_put);
213
214
/* Lookup a tunnel. A new reference is held on the returned tunnel. */
215
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
216
{
217
const struct l2tp_net *pn = l2tp_pernet(net);
218
struct l2tp_tunnel *tunnel;
219
220
rcu_read_lock_bh();
221
tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
222
if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
223
rcu_read_unlock_bh();
224
return tunnel;
225
}
226
rcu_read_unlock_bh();
227
228
return NULL;
229
}
230
EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
231
232
struct l2tp_tunnel *l2tp_tunnel_get_next(const struct net *net, unsigned long *key)
233
{
234
struct l2tp_net *pn = l2tp_pernet(net);
235
struct l2tp_tunnel *tunnel = NULL;
236
237
rcu_read_lock_bh();
238
again:
239
tunnel = idr_get_next_ul(&pn->l2tp_tunnel_idr, key);
240
if (tunnel) {
241
if (refcount_inc_not_zero(&tunnel->ref_count)) {
242
rcu_read_unlock_bh();
243
return tunnel;
244
}
245
(*key)++;
246
goto again;
247
}
248
rcu_read_unlock_bh();
249
250
return NULL;
251
}
252
EXPORT_SYMBOL_GPL(l2tp_tunnel_get_next);
253
254
struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id)
255
{
256
const struct l2tp_net *pn = l2tp_pernet(net);
257
struct l2tp_session *session;
258
259
rcu_read_lock_bh();
260
session = idr_find(&pn->l2tp_v3_session_idr, session_id);
261
if (session && !hash_hashed(&session->hlist) &&
262
refcount_inc_not_zero(&session->ref_count)) {
263
rcu_read_unlock_bh();
264
return session;
265
}
266
267
/* If we get here and session is non-NULL, the session_id
268
* collides with one in another tunnel. If sk is non-NULL,
269
* find the session matching sk.
270
*/
271
if (session && sk) {
272
unsigned long key = l2tp_v3_session_hashkey(sk, session->session_id);
273
274
hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
275
hlist, key) {
276
/* session->tunnel may be NULL if another thread is in
277
* l2tp_session_register and has added an item to
278
* l2tp_v3_session_htable but hasn't yet added the
279
* session to its tunnel's session_list.
280
*/
281
struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
282
283
if (session->session_id == session_id &&
284
tunnel && tunnel->sock == sk &&
285
refcount_inc_not_zero(&session->ref_count)) {
286
rcu_read_unlock_bh();
287
return session;
288
}
289
}
290
}
291
rcu_read_unlock_bh();
292
293
return NULL;
294
}
295
EXPORT_SYMBOL_GPL(l2tp_v3_session_get);
296
297
struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id)
298
{
299
u32 session_key = l2tp_v2_session_key(tunnel_id, session_id);
300
const struct l2tp_net *pn = l2tp_pernet(net);
301
struct l2tp_session *session;
302
303
rcu_read_lock_bh();
304
session = idr_find(&pn->l2tp_v2_session_idr, session_key);
305
if (session && refcount_inc_not_zero(&session->ref_count)) {
306
rcu_read_unlock_bh();
307
return session;
308
}
309
rcu_read_unlock_bh();
310
311
return NULL;
312
}
313
EXPORT_SYMBOL_GPL(l2tp_v2_session_get);
314
315
struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver,
316
u32 tunnel_id, u32 session_id)
317
{
318
if (pver == L2TP_HDR_VER_2)
319
return l2tp_v2_session_get(net, tunnel_id, session_id);
320
else
321
return l2tp_v3_session_get(net, sk, session_id);
322
}
323
EXPORT_SYMBOL_GPL(l2tp_session_get);
324
325
static struct l2tp_session *l2tp_v2_session_get_next(const struct net *net,
326
u16 tid,
327
unsigned long *key)
328
{
329
struct l2tp_net *pn = l2tp_pernet(net);
330
struct l2tp_session *session = NULL;
331
332
/* Start searching within the range of the tid */
333
if (*key == 0)
334
*key = l2tp_v2_session_key(tid, 0);
335
336
rcu_read_lock_bh();
337
again:
338
session = idr_get_next_ul(&pn->l2tp_v2_session_idr, key);
339
if (session) {
340
struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
341
342
/* ignore sessions with id 0 as they are internal for pppol2tp */
343
if (session->session_id == 0) {
344
(*key)++;
345
goto again;
346
}
347
348
if (tunnel->tunnel_id == tid &&
349
refcount_inc_not_zero(&session->ref_count)) {
350
rcu_read_unlock_bh();
351
return session;
352
}
353
354
(*key)++;
355
if (tunnel->tunnel_id == tid)
356
goto again;
357
}
358
rcu_read_unlock_bh();
359
360
return NULL;
361
}
362
363
static struct l2tp_session *l2tp_v3_session_get_next(const struct net *net,
364
u32 tid, struct sock *sk,
365
unsigned long *key)
366
{
367
struct l2tp_net *pn = l2tp_pernet(net);
368
struct l2tp_session *session = NULL;
369
370
rcu_read_lock_bh();
371
again:
372
session = idr_get_next_ul(&pn->l2tp_v3_session_idr, key);
373
if (session && !hash_hashed(&session->hlist)) {
374
struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
375
376
if (tunnel && tunnel->tunnel_id == tid &&
377
refcount_inc_not_zero(&session->ref_count)) {
378
rcu_read_unlock_bh();
379
return session;
380
}
381
382
(*key)++;
383
goto again;
384
}
385
386
/* If we get here and session is non-NULL, the IDR entry may be one
387
* where the session_id collides with one in another tunnel. Check
388
* session_htable for a match. There can only be one session of a given
389
* ID per tunnel so we can return as soon as a match is found.
390
*/
391
if (session && hash_hashed(&session->hlist)) {
392
unsigned long hkey = l2tp_v3_session_hashkey(sk, session->session_id);
393
u32 sid = session->session_id;
394
395
hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
396
hlist, hkey) {
397
struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
398
399
if (session->session_id == sid &&
400
tunnel && tunnel->tunnel_id == tid &&
401
refcount_inc_not_zero(&session->ref_count)) {
402
rcu_read_unlock_bh();
403
return session;
404
}
405
}
406
407
/* If no match found, the colliding session ID isn't in our
408
* tunnel so try the next session ID.
409
*/
410
(*key)++;
411
goto again;
412
}
413
414
rcu_read_unlock_bh();
415
416
return NULL;
417
}
418
419
struct l2tp_session *l2tp_session_get_next(const struct net *net, struct sock *sk, int pver,
420
u32 tunnel_id, unsigned long *key)
421
{
422
if (pver == L2TP_HDR_VER_2)
423
return l2tp_v2_session_get_next(net, tunnel_id, key);
424
else
425
return l2tp_v3_session_get_next(net, tunnel_id, sk, key);
426
}
427
EXPORT_SYMBOL_GPL(l2tp_session_get_next);
428
429
/* Lookup a session by interface name.
430
* This is very inefficient but is only used by management interfaces.
431
*/
432
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
433
const char *ifname)
434
{
435
struct l2tp_net *pn = l2tp_pernet(net);
436
unsigned long tunnel_id, tmp;
437
struct l2tp_session *session;
438
struct l2tp_tunnel *tunnel;
439
440
rcu_read_lock_bh();
441
idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
442
if (tunnel) {
443
list_for_each_entry_rcu(session, &tunnel->session_list, list) {
444
if (!strcmp(session->ifname, ifname)) {
445
refcount_inc(&session->ref_count);
446
rcu_read_unlock_bh();
447
448
return session;
449
}
450
}
451
}
452
}
453
rcu_read_unlock_bh();
454
455
return NULL;
456
}
457
EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
458
459
static void l2tp_session_coll_list_add(struct l2tp_session_coll_list *clist,
460
struct l2tp_session *session)
461
{
462
refcount_inc(&session->ref_count);
463
WARN_ON_ONCE(session->coll_list);
464
session->coll_list = clist;
465
spin_lock(&clist->lock);
466
list_add(&session->clist, &clist->list);
467
spin_unlock(&clist->lock);
468
}
469
470
static int l2tp_session_collision_add(struct l2tp_net *pn,
471
struct l2tp_session *session1,
472
struct l2tp_session *session2)
473
{
474
struct l2tp_session_coll_list *clist;
475
476
lockdep_assert_held(&pn->l2tp_session_idr_lock);
477
478
if (!session2)
479
return -EEXIST;
480
481
/* If existing session is in IP-encap tunnel, refuse new session */
482
if (session2->tunnel->encap == L2TP_ENCAPTYPE_IP)
483
return -EEXIST;
484
485
clist = session2->coll_list;
486
if (!clist) {
487
/* First collision. Allocate list to manage the collided sessions
488
* and add the existing session to the list.
489
*/
490
clist = kmalloc(sizeof(*clist), GFP_ATOMIC);
491
if (!clist)
492
return -ENOMEM;
493
494
spin_lock_init(&clist->lock);
495
INIT_LIST_HEAD(&clist->list);
496
refcount_set(&clist->ref_count, 1);
497
l2tp_session_coll_list_add(clist, session2);
498
}
499
500
/* If existing session isn't already in the session hlist, add it. */
501
if (!hash_hashed(&session2->hlist))
502
hash_add_rcu(pn->l2tp_v3_session_htable, &session2->hlist,
503
session2->hlist_key);
504
505
/* Add new session to the hlist and collision list */
506
hash_add_rcu(pn->l2tp_v3_session_htable, &session1->hlist,
507
session1->hlist_key);
508
refcount_inc(&clist->ref_count);
509
l2tp_session_coll_list_add(clist, session1);
510
511
return 0;
512
}
513
514
static void l2tp_session_collision_del(struct l2tp_net *pn,
515
struct l2tp_session *session)
516
{
517
struct l2tp_session_coll_list *clist = session->coll_list;
518
unsigned long session_key = session->session_id;
519
struct l2tp_session *session2;
520
521
lockdep_assert_held(&pn->l2tp_session_idr_lock);
522
523
hash_del_rcu(&session->hlist);
524
525
if (clist) {
526
/* Remove session from its collision list. If there
527
* are other sessions with the same ID, replace this
528
* session's IDR entry with that session, otherwise
529
* remove the IDR entry. If this is the last session,
530
* the collision list data is freed.
531
*/
532
spin_lock(&clist->lock);
533
list_del_init(&session->clist);
534
session2 = list_first_entry_or_null(&clist->list, struct l2tp_session, clist);
535
if (session2) {
536
void *old = idr_replace(&pn->l2tp_v3_session_idr, session2, session_key);
537
538
WARN_ON_ONCE(IS_ERR_VALUE(old));
539
} else {
540
void *removed = idr_remove(&pn->l2tp_v3_session_idr, session_key);
541
542
WARN_ON_ONCE(removed != session);
543
}
544
session->coll_list = NULL;
545
spin_unlock(&clist->lock);
546
if (refcount_dec_and_test(&clist->ref_count))
547
kfree(clist);
548
l2tp_session_put(session);
549
}
550
}
551
552
int l2tp_session_register(struct l2tp_session *session,
553
struct l2tp_tunnel *tunnel)
554
{
555
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
556
struct l2tp_session *other_session = NULL;
557
void *old = NULL;
558
u32 session_key;
559
int err;
560
561
spin_lock_bh(&tunnel->list_lock);
562
spin_lock_bh(&pn->l2tp_session_idr_lock);
563
564
if (!tunnel->acpt_newsess) {
565
err = -ENODEV;
566
goto out;
567
}
568
569
if (tunnel->version == L2TP_HDR_VER_3) {
570
session_key = session->session_id;
571
err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL,
572
&session_key, session_key, GFP_ATOMIC);
573
/* IP encap expects session IDs to be globally unique, while
574
* UDP encap doesn't. This isn't per the RFC, which says that
575
* sessions are identified only by the session ID, but is to
576
* support existing userspace which depends on it.
577
*/
578
if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) {
579
other_session = idr_find(&pn->l2tp_v3_session_idr,
580
session_key);
581
err = l2tp_session_collision_add(pn, session,
582
other_session);
583
}
584
} else {
585
session_key = l2tp_v2_session_key(tunnel->tunnel_id,
586
session->session_id);
587
err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL,
588
&session_key, session_key, GFP_ATOMIC);
589
}
590
591
if (err) {
592
if (err == -ENOSPC)
593
err = -EEXIST;
594
goto out;
595
}
596
597
refcount_inc(&tunnel->ref_count);
598
WRITE_ONCE(session->tunnel, tunnel);
599
list_add_rcu(&session->list, &tunnel->session_list);
600
601
/* this makes session available to lockless getters */
602
if (tunnel->version == L2TP_HDR_VER_3) {
603
if (!other_session)
604
old = idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
605
} else {
606
old = idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
607
}
608
609
/* old should be NULL, unless something removed or modified
610
* the IDR entry after our idr_alloc_32 above (which shouldn't
611
* happen).
612
*/
613
WARN_ON_ONCE(old);
614
out:
615
spin_unlock_bh(&pn->l2tp_session_idr_lock);
616
spin_unlock_bh(&tunnel->list_lock);
617
618
if (!err)
619
trace_register_session(session);
620
621
return err;
622
}
623
EXPORT_SYMBOL_GPL(l2tp_session_register);
624
625
/*****************************************************************************
626
* Receive data handling
627
*****************************************************************************/
628
629
/* Queue a skb in order. We come here only if the skb has an L2TP sequence
630
* number.
631
*/
632
static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
633
{
634
struct sk_buff *skbp;
635
struct sk_buff *tmp;
636
u32 ns = L2TP_SKB_CB(skb)->ns;
637
638
spin_lock_bh(&session->reorder_q.lock);
639
skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
640
if (L2TP_SKB_CB(skbp)->ns > ns) {
641
__skb_queue_before(&session->reorder_q, skbp, skb);
642
atomic_long_inc(&session->stats.rx_oos_packets);
643
goto out;
644
}
645
}
646
647
__skb_queue_tail(&session->reorder_q, skb);
648
649
out:
650
spin_unlock_bh(&session->reorder_q.lock);
651
}
652
653
/* Dequeue a single skb.
654
*/
655
static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
656
{
657
struct l2tp_tunnel *tunnel = session->tunnel;
658
int length = L2TP_SKB_CB(skb)->length;
659
660
/* We're about to requeue the skb, so return resources
661
* to its current owner (a socket receive buffer).
662
*/
663
skb_orphan(skb);
664
665
atomic_long_inc(&tunnel->stats.rx_packets);
666
atomic_long_add(length, &tunnel->stats.rx_bytes);
667
atomic_long_inc(&session->stats.rx_packets);
668
atomic_long_add(length, &session->stats.rx_bytes);
669
670
if (L2TP_SKB_CB(skb)->has_seq) {
671
/* Bump our Nr */
672
session->nr++;
673
session->nr &= session->nr_max;
674
trace_session_seqnum_update(session);
675
}
676
677
/* call private receive handler */
678
if (session->recv_skb)
679
(*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
680
else
681
kfree_skb(skb);
682
}
683
684
/* Dequeue skbs from the session's reorder_q, subject to packet order.
685
* Skbs that have been in the queue for too long are simply discarded.
686
*/
687
static void l2tp_recv_dequeue(struct l2tp_session *session)
688
{
689
struct sk_buff *skb;
690
struct sk_buff *tmp;
691
692
/* If the pkt at the head of the queue has the nr that we
693
* expect to send up next, dequeue it and any other
694
* in-sequence packets behind it.
695
*/
696
start:
697
spin_lock_bh(&session->reorder_q.lock);
698
skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
699
struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
700
701
/* If the packet has been pending on the queue for too long, discard it */
702
if (time_after(jiffies, cb->expires)) {
703
atomic_long_inc(&session->stats.rx_seq_discards);
704
atomic_long_inc(&session->stats.rx_errors);
705
trace_session_pkt_expired(session, cb->ns);
706
session->reorder_skip = 1;
707
__skb_unlink(skb, &session->reorder_q);
708
kfree_skb(skb);
709
continue;
710
}
711
712
if (cb->has_seq) {
713
if (session->reorder_skip) {
714
session->reorder_skip = 0;
715
session->nr = cb->ns;
716
trace_session_seqnum_reset(session);
717
}
718
if (cb->ns != session->nr)
719
goto out;
720
}
721
__skb_unlink(skb, &session->reorder_q);
722
723
/* Process the skb. We release the queue lock while we
724
* do so to let other contexts process the queue.
725
*/
726
spin_unlock_bh(&session->reorder_q.lock);
727
l2tp_recv_dequeue_skb(session, skb);
728
goto start;
729
}
730
731
out:
732
spin_unlock_bh(&session->reorder_q.lock);
733
}
734
735
static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
736
{
737
u32 nws;
738
739
if (nr >= session->nr)
740
nws = nr - session->nr;
741
else
742
nws = (session->nr_max + 1) - (session->nr - nr);
743
744
return nws < session->nr_window_size;
745
}
746
747
/* If packet has sequence numbers, queue it if acceptable. Returns 0 if
748
* acceptable, else non-zero.
749
*/
750
static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
751
{
752
struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
753
754
if (!l2tp_seq_check_rx_window(session, cb->ns)) {
755
/* Packet sequence number is outside allowed window.
756
* Discard it.
757
*/
758
trace_session_pkt_outside_rx_window(session, cb->ns);
759
goto discard;
760
}
761
762
if (session->reorder_timeout != 0) {
763
/* Packet reordering enabled. Add skb to session's
764
* reorder queue, in order of ns.
765
*/
766
l2tp_recv_queue_skb(session, skb);
767
goto out;
768
}
769
770
/* Packet reordering disabled. Discard out-of-sequence packets, while
771
* tracking the number if in-sequence packets after the first OOS packet
772
* is seen. After nr_oos_count_max in-sequence packets, reset the
773
* sequence number to re-enable packet reception.
774
*/
775
if (cb->ns == session->nr) {
776
skb_queue_tail(&session->reorder_q, skb);
777
} else {
778
u32 nr_oos = cb->ns;
779
u32 nr_next = (session->nr_oos + 1) & session->nr_max;
780
781
if (nr_oos == nr_next)
782
session->nr_oos_count++;
783
else
784
session->nr_oos_count = 0;
785
786
session->nr_oos = nr_oos;
787
if (session->nr_oos_count > session->nr_oos_count_max) {
788
session->reorder_skip = 1;
789
}
790
if (!session->reorder_skip) {
791
atomic_long_inc(&session->stats.rx_seq_discards);
792
trace_session_pkt_oos(session, cb->ns);
793
goto discard;
794
}
795
skb_queue_tail(&session->reorder_q, skb);
796
}
797
798
out:
799
return 0;
800
801
discard:
802
return 1;
803
}
804
805
/* Do receive processing of L2TP data frames. We handle both L2TPv2
806
* and L2TPv3 data frames here.
807
*
808
* L2TPv2 Data Message Header
809
*
810
* 0 1 2 3
811
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
812
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
813
* |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
814
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
815
* | Tunnel ID | Session ID |
816
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
817
* | Ns (opt) | Nr (opt) |
818
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
819
* | Offset Size (opt) | Offset pad... (opt)
820
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
821
*
822
* Data frames are marked by T=0. All other fields are the same as
823
* those in L2TP control frames.
824
*
825
* L2TPv3 Data Message Header
826
*
827
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
828
* | L2TP Session Header |
829
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
830
* | L2-Specific Sublayer |
831
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
832
* | Tunnel Payload ...
833
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
834
*
835
* L2TPv3 Session Header Over IP
836
*
837
* 0 1 2 3
838
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
839
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
840
* | Session ID |
841
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
842
* | Cookie (optional, maximum 64 bits)...
843
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
844
* |
845
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
846
*
847
* L2TPv3 L2-Specific Sublayer Format
848
*
849
* 0 1 2 3
850
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
851
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
852
* |x|S|x|x|x|x|x|x| Sequence Number |
853
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
854
*
855
* Cookie value and sublayer format are negotiated with the peer when
856
* the session is set up. Unlike L2TPv2, we do not need to parse the
857
* packet header to determine if optional fields are present.
858
*
859
* Caller must already have parsed the frame and determined that it is
860
* a data (not control) frame before coming here. Fields up to the
861
* session-id have already been parsed and ptr points to the data
862
* after the session-id.
863
*/
864
void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
865
unsigned char *ptr, unsigned char *optr, u16 hdrflags,
866
int length)
867
{
868
struct l2tp_tunnel *tunnel = session->tunnel;
869
int offset;
870
871
/* Parse and check optional cookie */
872
if (session->peer_cookie_len > 0) {
873
if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
874
pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
875
tunnel->name, tunnel->tunnel_id,
876
session->session_id);
877
atomic_long_inc(&session->stats.rx_cookie_discards);
878
goto discard;
879
}
880
ptr += session->peer_cookie_len;
881
}
882
883
/* Handle the optional sequence numbers. Sequence numbers are
884
* in different places for L2TPv2 and L2TPv3.
885
*
886
* If we are the LAC, enable/disable sequence numbers under
887
* the control of the LNS. If no sequence numbers present but
888
* we were expecting them, discard frame.
889
*/
890
L2TP_SKB_CB(skb)->has_seq = 0;
891
if (tunnel->version == L2TP_HDR_VER_2) {
892
if (hdrflags & L2TP_HDRFLAG_S) {
893
/* Store L2TP info in the skb */
894
L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
895
L2TP_SKB_CB(skb)->has_seq = 1;
896
ptr += 2;
897
/* Skip past nr in the header */
898
ptr += 2;
899
900
}
901
} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
902
u32 l2h = ntohl(*(__be32 *)ptr);
903
904
if (l2h & 0x40000000) {
905
/* Store L2TP info in the skb */
906
L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
907
L2TP_SKB_CB(skb)->has_seq = 1;
908
}
909
ptr += 4;
910
}
911
912
if (L2TP_SKB_CB(skb)->has_seq) {
913
/* Received a packet with sequence numbers. If we're the LAC,
914
* check if we sre sending sequence numbers and if not,
915
* configure it so.
916
*/
917
if (!session->lns_mode && !session->send_seq) {
918
trace_session_seqnum_lns_enable(session);
919
session->send_seq = 1;
920
l2tp_session_set_header_len(session, tunnel->version,
921
tunnel->encap);
922
}
923
} else {
924
/* No sequence numbers.
925
* If user has configured mandatory sequence numbers, discard.
926
*/
927
if (session->recv_seq) {
928
pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
929
session->name);
930
atomic_long_inc(&session->stats.rx_seq_discards);
931
goto discard;
932
}
933
934
/* If we're the LAC and we're sending sequence numbers, the
935
* LNS has requested that we no longer send sequence numbers.
936
* If we're the LNS and we're sending sequence numbers, the
937
* LAC is broken. Discard the frame.
938
*/
939
if (!session->lns_mode && session->send_seq) {
940
trace_session_seqnum_lns_disable(session);
941
session->send_seq = 0;
942
l2tp_session_set_header_len(session, tunnel->version,
943
tunnel->encap);
944
} else if (session->send_seq) {
945
pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
946
session->name);
947
atomic_long_inc(&session->stats.rx_seq_discards);
948
goto discard;
949
}
950
}
951
952
/* Session data offset is defined only for L2TPv2 and is
953
* indicated by an optional 16-bit value in the header.
954
*/
955
if (tunnel->version == L2TP_HDR_VER_2) {
956
/* If offset bit set, skip it. */
957
if (hdrflags & L2TP_HDRFLAG_O) {
958
offset = ntohs(*(__be16 *)ptr);
959
ptr += 2 + offset;
960
}
961
}
962
963
offset = ptr - optr;
964
if (!pskb_may_pull(skb, offset))
965
goto discard;
966
967
__skb_pull(skb, offset);
968
969
/* Prepare skb for adding to the session's reorder_q. Hold
970
* packets for max reorder_timeout or 1 second if not
971
* reordering.
972
*/
973
L2TP_SKB_CB(skb)->length = length;
974
L2TP_SKB_CB(skb)->expires = jiffies +
975
(session->reorder_timeout ? session->reorder_timeout : HZ);
976
977
/* Add packet to the session's receive queue. Reordering is done here, if
978
* enabled. Saved L2TP protocol info is stored in skb->sb[].
979
*/
980
if (L2TP_SKB_CB(skb)->has_seq) {
981
if (l2tp_recv_data_seq(session, skb))
982
goto discard;
983
} else {
984
/* No sequence numbers. Add the skb to the tail of the
985
* reorder queue. This ensures that it will be
986
* delivered after all previous sequenced skbs.
987
*/
988
skb_queue_tail(&session->reorder_q, skb);
989
}
990
991
/* Try to dequeue as many skbs from reorder_q as we can. */
992
l2tp_recv_dequeue(session);
993
994
return;
995
996
discard:
997
atomic_long_inc(&session->stats.rx_errors);
998
kfree_skb(skb);
999
}
1000
EXPORT_SYMBOL_GPL(l2tp_recv_common);
1001
1002
/* Drop skbs from the session's reorder_q
1003
*/
1004
static void l2tp_session_queue_purge(struct l2tp_session *session)
1005
{
1006
struct sk_buff *skb = NULL;
1007
1008
while ((skb = skb_dequeue(&session->reorder_q))) {
1009
atomic_long_inc(&session->stats.rx_errors);
1010
kfree_skb(skb);
1011
}
1012
}
1013
1014
/* UDP encapsulation receive handler. See net/ipv4/udp.c for details. */
1015
int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1016
{
1017
struct l2tp_session *session = NULL;
1018
struct l2tp_tunnel *tunnel = NULL;
1019
struct net *net = sock_net(sk);
1020
unsigned char *ptr, *optr;
1021
u16 hdrflags;
1022
u16 version;
1023
int length;
1024
1025
/* UDP has verified checksum */
1026
1027
/* UDP always verifies the packet length. */
1028
__skb_pull(skb, sizeof(struct udphdr));
1029
1030
/* Short packet? */
1031
if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX))
1032
goto pass;
1033
1034
/* Point to L2TP header */
1035
optr = skb->data;
1036
ptr = skb->data;
1037
1038
/* Get L2TP header flags */
1039
hdrflags = ntohs(*(__be16 *)ptr);
1040
1041
/* Get protocol version */
1042
version = hdrflags & L2TP_HDR_VER_MASK;
1043
1044
/* Get length of L2TP packet */
1045
length = skb->len;
1046
1047
/* If type is control packet, it is handled by userspace. */
1048
if (hdrflags & L2TP_HDRFLAG_T)
1049
goto pass;
1050
1051
/* Skip flags */
1052
ptr += 2;
1053
1054
if (version == L2TP_HDR_VER_2) {
1055
u16 tunnel_id, session_id;
1056
1057
/* If length is present, skip it */
1058
if (hdrflags & L2TP_HDRFLAG_L)
1059
ptr += 2;
1060
1061
/* Extract tunnel and session ID */
1062
tunnel_id = ntohs(*(__be16 *)ptr);
1063
ptr += 2;
1064
session_id = ntohs(*(__be16 *)ptr);
1065
ptr += 2;
1066
1067
session = l2tp_v2_session_get(net, tunnel_id, session_id);
1068
} else {
1069
u32 session_id;
1070
1071
ptr += 2; /* skip reserved bits */
1072
session_id = ntohl(*(__be32 *)ptr);
1073
ptr += 4;
1074
1075
session = l2tp_v3_session_get(net, sk, session_id);
1076
}
1077
1078
if (!session || !session->recv_skb) {
1079
if (session)
1080
l2tp_session_put(session);
1081
1082
/* Not found? Pass to userspace to deal with */
1083
goto pass;
1084
}
1085
1086
tunnel = session->tunnel;
1087
1088
/* Check protocol version */
1089
if (version != tunnel->version) {
1090
l2tp_session_put(session);
1091
goto invalid;
1092
}
1093
1094
if (version == L2TP_HDR_VER_3 &&
1095
l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
1096
l2tp_session_put(session);
1097
goto invalid;
1098
}
1099
1100
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
1101
l2tp_session_put(session);
1102
1103
return 0;
1104
1105
invalid:
1106
atomic_long_inc(&tunnel->stats.rx_invalid);
1107
1108
pass:
1109
/* Put UDP header back */
1110
__skb_push(skb, sizeof(struct udphdr));
1111
1112
return 1;
1113
}
1114
EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
1115
1116
/* UDP encapsulation receive error handler. See net/ipv4/udp.c for details. */
1117
static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err,
1118
__be16 port, u32 info, u8 *payload)
1119
{
1120
sk->sk_err = err;
1121
sk_error_report(sk);
1122
1123
if (ip_hdr(skb)->version == IPVERSION) {
1124
if (inet_test_bit(RECVERR, sk))
1125
return ip_icmp_error(sk, skb, err, port, info, payload);
1126
#if IS_ENABLED(CONFIG_IPV6)
1127
} else {
1128
if (inet6_test_bit(RECVERR6, sk))
1129
return ipv6_icmp_error(sk, skb, err, port, info, payload);
1130
#endif
1131
}
1132
}
1133
1134
/************************************************************************
1135
* Transmit handling
1136
***********************************************************************/
1137
1138
/* Build an L2TP header for the session into the buffer provided.
1139
*/
1140
static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
1141
{
1142
struct l2tp_tunnel *tunnel = session->tunnel;
1143
__be16 *bufp = buf;
1144
__be16 *optr = buf;
1145
u16 flags = L2TP_HDR_VER_2;
1146
u32 tunnel_id = tunnel->peer_tunnel_id;
1147
u32 session_id = session->peer_session_id;
1148
1149
if (session->send_seq)
1150
flags |= L2TP_HDRFLAG_S;
1151
1152
/* Setup L2TP header. */
1153
*bufp++ = htons(flags);
1154
*bufp++ = htons(tunnel_id);
1155
*bufp++ = htons(session_id);
1156
if (session->send_seq) {
1157
*bufp++ = htons(session->ns);
1158
*bufp++ = 0;
1159
session->ns++;
1160
session->ns &= 0xffff;
1161
trace_session_seqnum_update(session);
1162
}
1163
1164
return bufp - optr;
1165
}
1166
1167
static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1168
{
1169
struct l2tp_tunnel *tunnel = session->tunnel;
1170
char *bufp = buf;
1171
char *optr = bufp;
1172
1173
/* Setup L2TP header. The header differs slightly for UDP and
1174
* IP encapsulations. For UDP, there is 4 bytes of flags.
1175
*/
1176
if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1177
u16 flags = L2TP_HDR_VER_3;
1178
*((__be16 *)bufp) = htons(flags);
1179
bufp += 2;
1180
*((__be16 *)bufp) = 0;
1181
bufp += 2;
1182
}
1183
1184
*((__be32 *)bufp) = htonl(session->peer_session_id);
1185
bufp += 4;
1186
if (session->cookie_len) {
1187
memcpy(bufp, &session->cookie[0], session->cookie_len);
1188
bufp += session->cookie_len;
1189
}
1190
if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1191
u32 l2h = 0;
1192
1193
if (session->send_seq) {
1194
l2h = 0x40000000 | session->ns;
1195
session->ns++;
1196
session->ns &= 0xffffff;
1197
trace_session_seqnum_update(session);
1198
}
1199
1200
*((__be32 *)bufp) = htonl(l2h);
1201
bufp += 4;
1202
}
1203
1204
return bufp - optr;
1205
}
1206
1207
/* Queue the packet to IP for output: tunnel socket lock must be held */
1208
static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
1209
{
1210
int err;
1211
1212
skb->ignore_df = 1;
1213
skb_dst_drop(skb);
1214
#if IS_ENABLED(CONFIG_IPV6)
1215
if (l2tp_sk_is_v6(tunnel->sock))
1216
err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1217
else
1218
#endif
1219
err = ip_queue_xmit(tunnel->sock, skb, fl);
1220
1221
return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1222
}
1223
1224
static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1225
{
1226
struct l2tp_tunnel *tunnel = session->tunnel;
1227
unsigned int data_len = skb->len;
1228
struct sock *sk = tunnel->sock;
1229
int headroom, uhlen, udp_len;
1230
int ret = NET_XMIT_SUCCESS;
1231
struct inet_sock *inet;
1232
struct udphdr *uh;
1233
1234
/* Check that there's enough headroom in the skb to insert IP,
1235
* UDP and L2TP headers. If not enough, expand it to
1236
* make room. Adjust truesize.
1237
*/
1238
uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1239
headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1240
if (skb_cow_head(skb, headroom)) {
1241
kfree_skb(skb);
1242
return NET_XMIT_DROP;
1243
}
1244
1245
/* Setup L2TP header */
1246
if (tunnel->version == L2TP_HDR_VER_2)
1247
l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1248
else
1249
l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1250
1251
/* Reset control buffer */
1252
memset(skb->cb, 0, sizeof(skb->cb));
1253
1254
nf_reset_ct(skb);
1255
1256
/* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
1257
* nested socket calls on the same lockdep socket class. This can
1258
* happen when data from a user socket is routed over l2tp, which uses
1259
* another userspace socket.
1260
*/
1261
spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
1262
1263
if (sock_owned_by_user(sk)) {
1264
kfree_skb(skb);
1265
ret = NET_XMIT_DROP;
1266
goto out_unlock;
1267
}
1268
1269
/* The user-space may change the connection status for the user-space
1270
* provided socket at run time: we must check it under the socket lock
1271
*/
1272
if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1273
kfree_skb(skb);
1274
ret = NET_XMIT_DROP;
1275
goto out_unlock;
1276
}
1277
1278
/* Report transmitted length before we add encap header, which keeps
1279
* statistics consistent for both UDP and IP encap tx/rx paths.
1280
*/
1281
*len = skb->len;
1282
1283
inet = inet_sk(sk);
1284
switch (tunnel->encap) {
1285
case L2TP_ENCAPTYPE_UDP:
1286
/* Setup UDP header */
1287
__skb_push(skb, sizeof(*uh));
1288
skb_reset_transport_header(skb);
1289
uh = udp_hdr(skb);
1290
uh->source = inet->inet_sport;
1291
uh->dest = inet->inet_dport;
1292
udp_len = uhlen + session->hdr_len + data_len;
1293
uh->len = htons(udp_len);
1294
1295
/* Calculate UDP checksum if configured to do so */
1296
#if IS_ENABLED(CONFIG_IPV6)
1297
if (l2tp_sk_is_v6(sk))
1298
udp6_set_csum(udp_get_no_check6_tx(sk),
1299
skb, &inet6_sk(sk)->saddr,
1300
&sk->sk_v6_daddr, udp_len);
1301
else
1302
#endif
1303
udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1304
inet->inet_daddr, udp_len);
1305
break;
1306
1307
case L2TP_ENCAPTYPE_IP:
1308
break;
1309
}
1310
1311
ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1312
1313
out_unlock:
1314
spin_unlock(&sk->sk_lock.slock);
1315
1316
return ret;
1317
}
1318
1319
/* If caller requires the skb to have a ppp header, the header must be
1320
* inserted in the skb data before calling this function.
1321
*/
1322
int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1323
{
1324
unsigned int len = 0;
1325
int ret;
1326
1327
ret = l2tp_xmit_core(session, skb, &len);
1328
if (ret == NET_XMIT_SUCCESS) {
1329
atomic_long_inc(&session->tunnel->stats.tx_packets);
1330
atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1331
atomic_long_inc(&session->stats.tx_packets);
1332
atomic_long_add(len, &session->stats.tx_bytes);
1333
} else {
1334
atomic_long_inc(&session->tunnel->stats.tx_errors);
1335
atomic_long_inc(&session->stats.tx_errors);
1336
}
1337
return ret;
1338
}
1339
EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1340
1341
/*****************************************************************************
1342
* Tinnel and session create/destroy.
1343
*****************************************************************************/
1344
1345
/* Remove an l2tp session from l2tp_core's lists. */
1346
static void l2tp_session_unhash(struct l2tp_session *session)
1347
{
1348
struct l2tp_tunnel *tunnel = session->tunnel;
1349
1350
if (tunnel) {
1351
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1352
struct l2tp_session *removed = session;
1353
1354
spin_lock_bh(&tunnel->list_lock);
1355
spin_lock_bh(&pn->l2tp_session_idr_lock);
1356
1357
/* Remove from the per-tunnel list */
1358
list_del_init(&session->list);
1359
1360
/* Remove from per-net IDR */
1361
if (tunnel->version == L2TP_HDR_VER_3) {
1362
if (hash_hashed(&session->hlist))
1363
l2tp_session_collision_del(pn, session);
1364
else
1365
removed = idr_remove(&pn->l2tp_v3_session_idr,
1366
session->session_id);
1367
} else {
1368
u32 session_key = l2tp_v2_session_key(tunnel->tunnel_id,
1369
session->session_id);
1370
removed = idr_remove(&pn->l2tp_v2_session_idr,
1371
session_key);
1372
}
1373
WARN_ON_ONCE(removed && removed != session);
1374
1375
spin_unlock_bh(&pn->l2tp_session_idr_lock);
1376
spin_unlock_bh(&tunnel->list_lock);
1377
}
1378
}
1379
1380
/* When the tunnel is closed, all the attached sessions need to go too.
1381
*/
1382
static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1383
{
1384
struct l2tp_session *session;
1385
1386
spin_lock_bh(&tunnel->list_lock);
1387
tunnel->acpt_newsess = false;
1388
list_for_each_entry(session, &tunnel->session_list, list)
1389
l2tp_session_delete(session);
1390
spin_unlock_bh(&tunnel->list_lock);
1391
}
1392
1393
/* Tunnel socket destroy hook for UDP encapsulation */
1394
static void l2tp_udp_encap_destroy(struct sock *sk)
1395
{
1396
struct l2tp_tunnel *tunnel;
1397
1398
tunnel = l2tp_sk_to_tunnel(sk);
1399
if (tunnel) {
1400
l2tp_tunnel_delete(tunnel);
1401
l2tp_tunnel_put(tunnel);
1402
}
1403
}
1404
1405
static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
1406
{
1407
struct l2tp_net *pn = l2tp_pernet(net);
1408
1409
spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1410
idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
1411
spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1412
}
1413
1414
/* Workqueue tunnel deletion function */
1415
static void l2tp_tunnel_del_work(struct work_struct *work)
1416
{
1417
struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1418
del_work);
1419
1420
l2tp_tunnel_closeall(tunnel);
1421
1422
/* If the tunnel socket was created within the kernel, use
1423
* the sk API to release it here.
1424
*/
1425
if (tunnel->fd < 0) {
1426
struct socket *sock = tunnel->sock->sk_socket;
1427
1428
if (sock) {
1429
kernel_sock_shutdown(sock, SHUT_RDWR);
1430
sock_release(sock);
1431
}
1432
}
1433
1434
l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
1435
/* drop initial ref */
1436
l2tp_tunnel_put(tunnel);
1437
1438
/* drop workqueue ref */
1439
l2tp_tunnel_put(tunnel);
1440
}
1441
1442
/* Create a socket for the tunnel, if one isn't set up by
1443
* userspace. This is used for static tunnels where there is no
1444
* managing L2TP daemon.
1445
*
1446
* Since we don't want these sockets to keep a namespace alive by
1447
* themselves, we drop the socket's namespace refcount after creation.
1448
* These sockets are freed when the namespace exits using the pernet
1449
* exit hook.
1450
*/
1451
static int l2tp_tunnel_sock_create(struct net *net,
1452
u32 tunnel_id,
1453
u32 peer_tunnel_id,
1454
struct l2tp_tunnel_cfg *cfg,
1455
struct socket **sockp)
1456
{
1457
int err = -EINVAL;
1458
struct socket *sock = NULL;
1459
struct udp_port_cfg udp_conf;
1460
1461
switch (cfg->encap) {
1462
case L2TP_ENCAPTYPE_UDP:
1463
memset(&udp_conf, 0, sizeof(udp_conf));
1464
1465
#if IS_ENABLED(CONFIG_IPV6)
1466
if (cfg->local_ip6 && cfg->peer_ip6) {
1467
udp_conf.family = AF_INET6;
1468
memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1469
sizeof(udp_conf.local_ip6));
1470
memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1471
sizeof(udp_conf.peer_ip6));
1472
udp_conf.use_udp6_tx_checksums =
1473
!cfg->udp6_zero_tx_checksums;
1474
udp_conf.use_udp6_rx_checksums =
1475
!cfg->udp6_zero_rx_checksums;
1476
} else
1477
#endif
1478
{
1479
udp_conf.family = AF_INET;
1480
udp_conf.local_ip = cfg->local_ip;
1481
udp_conf.peer_ip = cfg->peer_ip;
1482
udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1483
}
1484
1485
udp_conf.local_udp_port = htons(cfg->local_udp_port);
1486
udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1487
1488
err = udp_sock_create(net, &udp_conf, &sock);
1489
if (err < 0)
1490
goto out;
1491
1492
break;
1493
1494
case L2TP_ENCAPTYPE_IP:
1495
#if IS_ENABLED(CONFIG_IPV6)
1496
if (cfg->local_ip6 && cfg->peer_ip6) {
1497
struct sockaddr_l2tpip6 ip6_addr = {0};
1498
1499
err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1500
IPPROTO_L2TP, &sock);
1501
if (err < 0)
1502
goto out;
1503
1504
ip6_addr.l2tp_family = AF_INET6;
1505
memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1506
sizeof(ip6_addr.l2tp_addr));
1507
ip6_addr.l2tp_conn_id = tunnel_id;
1508
err = kernel_bind(sock, (struct sockaddr_unsized *)&ip6_addr,
1509
sizeof(ip6_addr));
1510
if (err < 0)
1511
goto out;
1512
1513
ip6_addr.l2tp_family = AF_INET6;
1514
memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1515
sizeof(ip6_addr.l2tp_addr));
1516
ip6_addr.l2tp_conn_id = peer_tunnel_id;
1517
err = kernel_connect(sock,
1518
(struct sockaddr_unsized *)&ip6_addr,
1519
sizeof(ip6_addr), 0);
1520
if (err < 0)
1521
goto out;
1522
} else
1523
#endif
1524
{
1525
struct sockaddr_l2tpip ip_addr = {0};
1526
1527
err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1528
IPPROTO_L2TP, &sock);
1529
if (err < 0)
1530
goto out;
1531
1532
ip_addr.l2tp_family = AF_INET;
1533
ip_addr.l2tp_addr = cfg->local_ip;
1534
ip_addr.l2tp_conn_id = tunnel_id;
1535
err = kernel_bind(sock, (struct sockaddr_unsized *)&ip_addr,
1536
sizeof(ip_addr));
1537
if (err < 0)
1538
goto out;
1539
1540
ip_addr.l2tp_family = AF_INET;
1541
ip_addr.l2tp_addr = cfg->peer_ip;
1542
ip_addr.l2tp_conn_id = peer_tunnel_id;
1543
err = kernel_connect(sock, (struct sockaddr_unsized *)&ip_addr,
1544
sizeof(ip_addr), 0);
1545
if (err < 0)
1546
goto out;
1547
}
1548
break;
1549
1550
default:
1551
goto out;
1552
}
1553
1554
out:
1555
*sockp = sock;
1556
if (err < 0 && sock) {
1557
kernel_sock_shutdown(sock, SHUT_RDWR);
1558
sock_release(sock);
1559
*sockp = NULL;
1560
}
1561
1562
return err;
1563
}
1564
1565
int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1566
struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1567
{
1568
struct l2tp_tunnel *tunnel = NULL;
1569
int err;
1570
enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1571
1572
if (cfg)
1573
encap = cfg->encap;
1574
1575
tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
1576
if (!tunnel) {
1577
err = -ENOMEM;
1578
goto err;
1579
}
1580
1581
tunnel->version = version;
1582
tunnel->tunnel_id = tunnel_id;
1583
tunnel->peer_tunnel_id = peer_tunnel_id;
1584
1585
sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1586
spin_lock_init(&tunnel->list_lock);
1587
tunnel->acpt_newsess = true;
1588
INIT_LIST_HEAD(&tunnel->session_list);
1589
1590
tunnel->encap = encap;
1591
1592
refcount_set(&tunnel->ref_count, 1);
1593
tunnel->fd = fd;
1594
1595
/* Init delete workqueue struct */
1596
INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1597
1598
err = 0;
1599
err:
1600
if (tunnelp)
1601
*tunnelp = tunnel;
1602
1603
return err;
1604
}
1605
EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1606
1607
static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1608
enum l2tp_encap_type encap)
1609
{
1610
struct l2tp_tunnel *tunnel;
1611
1612
if (!net_eq(sock_net(sk), net))
1613
return -EINVAL;
1614
1615
if (sk->sk_type != SOCK_DGRAM)
1616
return -EPROTONOSUPPORT;
1617
1618
if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1619
return -EPROTONOSUPPORT;
1620
1621
if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1622
(encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1623
return -EPROTONOSUPPORT;
1624
1625
if (encap == L2TP_ENCAPTYPE_UDP && sk->sk_user_data)
1626
return -EBUSY;
1627
1628
tunnel = l2tp_sk_to_tunnel(sk);
1629
if (tunnel) {
1630
l2tp_tunnel_put(tunnel);
1631
return -EBUSY;
1632
}
1633
1634
return 0;
1635
}
1636
1637
int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1638
struct l2tp_tunnel_cfg *cfg)
1639
{
1640
struct l2tp_net *pn = l2tp_pernet(net);
1641
u32 tunnel_id = tunnel->tunnel_id;
1642
struct socket *sock;
1643
struct sock *sk;
1644
int ret;
1645
1646
spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1647
ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
1648
GFP_ATOMIC);
1649
spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1650
if (ret)
1651
return ret == -ENOSPC ? -EEXIST : ret;
1652
1653
if (tunnel->fd < 0) {
1654
ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1655
tunnel->peer_tunnel_id, cfg,
1656
&sock);
1657
if (ret < 0)
1658
goto err;
1659
} else {
1660
sock = sockfd_lookup(tunnel->fd, &ret);
1661
if (!sock)
1662
goto err;
1663
}
1664
1665
sk = sock->sk;
1666
lock_sock(sk);
1667
write_lock_bh(&sk->sk_callback_lock);
1668
ret = l2tp_validate_socket(sk, net, tunnel->encap);
1669
if (ret < 0)
1670
goto err_inval_sock;
1671
write_unlock_bh(&sk->sk_callback_lock);
1672
1673
if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1674
struct udp_tunnel_sock_cfg udp_cfg = {
1675
.encap_type = UDP_ENCAP_L2TPINUDP,
1676
.encap_rcv = l2tp_udp_encap_recv,
1677
.encap_err_rcv = l2tp_udp_encap_err_recv,
1678
.encap_destroy = l2tp_udp_encap_destroy,
1679
};
1680
1681
setup_udp_tunnel_sock(net, sock, &udp_cfg);
1682
}
1683
1684
sk->sk_allocation = GFP_ATOMIC;
1685
release_sock(sk);
1686
1687
sock_hold(sk);
1688
tunnel->sock = sk;
1689
tunnel->l2tp_net = net;
1690
1691
spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1692
idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
1693
spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1694
1695
trace_register_tunnel(tunnel);
1696
1697
if (tunnel->fd >= 0)
1698
sockfd_put(sock);
1699
1700
return 0;
1701
1702
err_inval_sock:
1703
write_unlock_bh(&sk->sk_callback_lock);
1704
release_sock(sk);
1705
1706
if (tunnel->fd < 0)
1707
sock_release(sock);
1708
else
1709
sockfd_put(sock);
1710
err:
1711
l2tp_tunnel_remove(net, tunnel);
1712
return ret;
1713
}
1714
EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1715
1716
/* This function is used by the netlink TUNNEL_DELETE command.
1717
*/
1718
void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1719
{
1720
if (!test_and_set_bit(0, &tunnel->dead)) {
1721
trace_delete_tunnel(tunnel);
1722
refcount_inc(&tunnel->ref_count);
1723
queue_work(l2tp_wq, &tunnel->del_work);
1724
}
1725
}
1726
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1727
1728
void l2tp_session_delete(struct l2tp_session *session)
1729
{
1730
if (!test_and_set_bit(0, &session->dead)) {
1731
trace_delete_session(session);
1732
refcount_inc(&session->ref_count);
1733
queue_work(l2tp_wq, &session->del_work);
1734
}
1735
}
1736
EXPORT_SYMBOL_GPL(l2tp_session_delete);
1737
1738
/* Workqueue session deletion function */
1739
static void l2tp_session_del_work(struct work_struct *work)
1740
{
1741
struct l2tp_session *session = container_of(work, struct l2tp_session,
1742
del_work);
1743
1744
l2tp_session_unhash(session);
1745
l2tp_session_queue_purge(session);
1746
if (session->session_close)
1747
(*session->session_close)(session);
1748
1749
/* drop initial ref */
1750
l2tp_session_put(session);
1751
1752
/* drop workqueue ref */
1753
l2tp_session_put(session);
1754
}
1755
1756
/* We come here whenever a session's send_seq, cookie_len or
1757
* l2specific_type parameters are set.
1758
*/
1759
void l2tp_session_set_header_len(struct l2tp_session *session, int version,
1760
enum l2tp_encap_type encap)
1761
{
1762
if (version == L2TP_HDR_VER_2) {
1763
session->hdr_len = 6;
1764
if (session->send_seq)
1765
session->hdr_len += 4;
1766
} else {
1767
session->hdr_len = 4 + session->cookie_len;
1768
session->hdr_len += l2tp_get_l2specific_len(session);
1769
if (encap == L2TP_ENCAPTYPE_UDP)
1770
session->hdr_len += 4;
1771
}
1772
}
1773
EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1774
1775
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1776
u32 peer_session_id, struct l2tp_session_cfg *cfg)
1777
{
1778
struct l2tp_session *session;
1779
1780
session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1781
if (session) {
1782
session->magic = L2TP_SESSION_MAGIC;
1783
1784
session->session_id = session_id;
1785
session->peer_session_id = peer_session_id;
1786
session->nr = 0;
1787
if (tunnel->version == L2TP_HDR_VER_2)
1788
session->nr_max = 0xffff;
1789
else
1790
session->nr_max = 0xffffff;
1791
session->nr_window_size = session->nr_max / 2;
1792
session->nr_oos_count_max = 4;
1793
1794
/* Use NR of first received packet */
1795
session->reorder_skip = 1;
1796
1797
sprintf(&session->name[0], "sess %u/%u",
1798
tunnel->tunnel_id, session->session_id);
1799
1800
skb_queue_head_init(&session->reorder_q);
1801
1802
session->hlist_key = l2tp_v3_session_hashkey(tunnel->sock, session->session_id);
1803
INIT_HLIST_NODE(&session->hlist);
1804
INIT_LIST_HEAD(&session->clist);
1805
INIT_LIST_HEAD(&session->list);
1806
INIT_WORK(&session->del_work, l2tp_session_del_work);
1807
1808
if (cfg) {
1809
session->pwtype = cfg->pw_type;
1810
session->send_seq = cfg->send_seq;
1811
session->recv_seq = cfg->recv_seq;
1812
session->lns_mode = cfg->lns_mode;
1813
session->reorder_timeout = cfg->reorder_timeout;
1814
session->l2specific_type = cfg->l2specific_type;
1815
session->cookie_len = cfg->cookie_len;
1816
memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1817
session->peer_cookie_len = cfg->peer_cookie_len;
1818
memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1819
}
1820
1821
l2tp_session_set_header_len(session, tunnel->version, tunnel->encap);
1822
1823
refcount_set(&session->ref_count, 1);
1824
1825
return session;
1826
}
1827
1828
return ERR_PTR(-ENOMEM);
1829
}
1830
EXPORT_SYMBOL_GPL(l2tp_session_create);
1831
1832
/*****************************************************************************
1833
* Init and cleanup
1834
*****************************************************************************/
1835
1836
static __net_init int l2tp_init_net(struct net *net)
1837
{
1838
struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1839
1840
idr_init(&pn->l2tp_tunnel_idr);
1841
spin_lock_init(&pn->l2tp_tunnel_idr_lock);
1842
1843
idr_init(&pn->l2tp_v2_session_idr);
1844
idr_init(&pn->l2tp_v3_session_idr);
1845
spin_lock_init(&pn->l2tp_session_idr_lock);
1846
1847
return 0;
1848
}
1849
1850
static __net_exit void l2tp_pre_exit_net(struct net *net)
1851
{
1852
struct l2tp_net *pn = l2tp_pernet(net);
1853
struct l2tp_tunnel *tunnel = NULL;
1854
unsigned long tunnel_id, tmp;
1855
1856
rcu_read_lock_bh();
1857
idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
1858
if (tunnel)
1859
l2tp_tunnel_delete(tunnel);
1860
}
1861
rcu_read_unlock_bh();
1862
1863
if (l2tp_wq) {
1864
/* Run all TUNNEL_DELETE work items just queued. */
1865
__flush_workqueue(l2tp_wq);
1866
1867
/* Each TUNNEL_DELETE work item will queue a SESSION_DELETE
1868
* work item for each session in the tunnel. Flush the
1869
* workqueue again to process these.
1870
*/
1871
__flush_workqueue(l2tp_wq);
1872
}
1873
}
1874
1875
static int l2tp_idr_item_unexpected(int id, void *p, void *data)
1876
{
1877
const char *idr_name = data;
1878
1879
pr_err("l2tp: %s IDR not empty at net %d exit\n", idr_name, id);
1880
WARN_ON_ONCE(1);
1881
return 1;
1882
}
1883
1884
static __net_exit void l2tp_exit_net(struct net *net)
1885
{
1886
struct l2tp_net *pn = l2tp_pernet(net);
1887
1888
/* Our per-net IDRs should be empty. Check that is so, to
1889
* help catch cleanup races or refcnt leaks.
1890
*/
1891
idr_for_each(&pn->l2tp_v2_session_idr, l2tp_idr_item_unexpected,
1892
"v2_session");
1893
idr_for_each(&pn->l2tp_v3_session_idr, l2tp_idr_item_unexpected,
1894
"v3_session");
1895
idr_for_each(&pn->l2tp_tunnel_idr, l2tp_idr_item_unexpected,
1896
"tunnel");
1897
1898
idr_destroy(&pn->l2tp_v2_session_idr);
1899
idr_destroy(&pn->l2tp_v3_session_idr);
1900
idr_destroy(&pn->l2tp_tunnel_idr);
1901
}
1902
1903
static struct pernet_operations l2tp_net_ops = {
1904
.init = l2tp_init_net,
1905
.exit = l2tp_exit_net,
1906
.pre_exit = l2tp_pre_exit_net,
1907
.id = &l2tp_net_id,
1908
.size = sizeof(struct l2tp_net),
1909
};
1910
1911
static int __init l2tp_init(void)
1912
{
1913
int rc = 0;
1914
1915
rc = register_pernet_device(&l2tp_net_ops);
1916
if (rc)
1917
goto out;
1918
1919
l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1920
if (!l2tp_wq) {
1921
pr_err("alloc_workqueue failed\n");
1922
unregister_pernet_device(&l2tp_net_ops);
1923
rc = -ENOMEM;
1924
goto out;
1925
}
1926
1927
pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1928
1929
out:
1930
return rc;
1931
}
1932
1933
static void __exit l2tp_exit(void)
1934
{
1935
unregister_pernet_device(&l2tp_net_ops);
1936
if (l2tp_wq) {
1937
destroy_workqueue(l2tp_wq);
1938
l2tp_wq = NULL;
1939
}
1940
}
1941
1942
module_init(l2tp_init);
1943
module_exit(l2tp_exit);
1944
1945
MODULE_AUTHOR("James Chapman <[email protected]>");
1946
MODULE_DESCRIPTION("L2TP core");
1947
MODULE_LICENSE("GPL");
1948
MODULE_VERSION(L2TP_DRV_VERSION);
1949
1950