Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/l2tp/l2tp_core.c
26146 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* L2TP core.
3
*
4
* Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5
*
6
* This file contains some code of the original L2TPv2 pppol2tp
7
* driver, which has the following copyright:
8
*
9
* Authors: Martijn van Oosterhout <[email protected]>
10
* James Chapman ([email protected])
11
* Contributors:
12
* Michal Ostrowski <[email protected]>
13
* Arnaldo Carvalho de Melo <[email protected]>
14
* David S. Miller ([email protected])
15
*/
16
17
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19
#include <linux/module.h>
20
#include <linux/string.h>
21
#include <linux/list.h>
22
#include <linux/rculist.h>
23
#include <linux/uaccess.h>
24
25
#include <linux/kernel.h>
26
#include <linux/spinlock.h>
27
#include <linux/kthread.h>
28
#include <linux/sched.h>
29
#include <linux/slab.h>
30
#include <linux/errno.h>
31
#include <linux/jiffies.h>
32
33
#include <linux/netdevice.h>
34
#include <linux/net.h>
35
#include <linux/inetdevice.h>
36
#include <linux/skbuff.h>
37
#include <linux/init.h>
38
#include <linux/in.h>
39
#include <linux/ip.h>
40
#include <linux/udp.h>
41
#include <linux/l2tp.h>
42
#include <linux/sort.h>
43
#include <linux/file.h>
44
#include <linux/nsproxy.h>
45
#include <net/net_namespace.h>
46
#include <net/netns/generic.h>
47
#include <net/dst.h>
48
#include <net/ip.h>
49
#include <net/udp.h>
50
#include <net/udp_tunnel.h>
51
#include <net/inet_common.h>
52
#include <net/xfrm.h>
53
#include <net/protocol.h>
54
#include <net/inet6_connection_sock.h>
55
#include <net/inet_ecn.h>
56
#include <net/ip6_route.h>
57
#include <net/ip6_checksum.h>
58
59
#include <asm/byteorder.h>
60
#include <linux/atomic.h>
61
62
#include "l2tp_core.h"
63
64
#define CREATE_TRACE_POINTS
65
#include "trace.h"
66
67
#define L2TP_DRV_VERSION "V2.0"
68
69
/* L2TP header constants */
70
#define L2TP_HDRFLAG_T 0x8000
71
#define L2TP_HDRFLAG_L 0x4000
72
#define L2TP_HDRFLAG_S 0x0800
73
#define L2TP_HDRFLAG_O 0x0200
74
#define L2TP_HDRFLAG_P 0x0100
75
76
#define L2TP_HDR_VER_MASK 0x000F
77
#define L2TP_HDR_VER_2 0x0002
78
#define L2TP_HDR_VER_3 0x0003
79
80
/* L2TPv3 default L2-specific sublayer */
81
#define L2TP_SLFLAG_S 0x40000000
82
#define L2TP_SL_SEQ_MASK 0x00ffffff
83
84
#define L2TP_HDR_SIZE_MAX 14
85
86
/* Default trace flags */
87
#define L2TP_DEFAULT_DEBUG_FLAGS 0
88
89
#define L2TP_DEPTH_NESTING 2
90
#if L2TP_DEPTH_NESTING == SINGLE_DEPTH_NESTING
91
#error "L2TP requires its own lockdep subclass"
92
#endif
93
94
/* Private data stored for received packets in the skb.
95
*/
96
struct l2tp_skb_cb {
97
u32 ns;
98
u16 has_seq;
99
u16 length;
100
unsigned long expires;
101
};
102
103
#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
104
105
static struct workqueue_struct *l2tp_wq;
106
107
/* per-net private data for this module */
108
static unsigned int l2tp_net_id;
109
struct l2tp_net {
110
/* Lock for write access to l2tp_tunnel_idr */
111
spinlock_t l2tp_tunnel_idr_lock;
112
struct idr l2tp_tunnel_idr;
113
/* Lock for write access to l2tp_v[23]_session_idr/htable */
114
spinlock_t l2tp_session_idr_lock;
115
struct idr l2tp_v2_session_idr;
116
struct idr l2tp_v3_session_idr;
117
struct hlist_head l2tp_v3_session_htable[16];
118
};
119
120
static u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id)
121
{
122
return ((u32)tunnel_id) << 16 | session_id;
123
}
124
125
static unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id)
126
{
127
return ((unsigned long)sk) + session_id;
128
}
129
130
#if IS_ENABLED(CONFIG_IPV6)
131
static bool l2tp_sk_is_v6(struct sock *sk)
132
{
133
return sk->sk_family == PF_INET6 &&
134
!ipv6_addr_v4mapped(&sk->sk_v6_daddr);
135
}
136
#endif
137
138
static struct l2tp_net *l2tp_pernet(const struct net *net)
139
{
140
return net_generic(net, l2tp_net_id);
141
}
142
143
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
144
{
145
struct sock *sk = tunnel->sock;
146
147
trace_free_tunnel(tunnel);
148
149
if (sk) {
150
/* Disable udp encapsulation */
151
switch (tunnel->encap) {
152
case L2TP_ENCAPTYPE_UDP:
153
/* No longer an encapsulation socket. See net/ipv4/udp.c */
154
WRITE_ONCE(udp_sk(sk)->encap_type, 0);
155
udp_sk(sk)->encap_rcv = NULL;
156
udp_sk(sk)->encap_destroy = NULL;
157
break;
158
case L2TP_ENCAPTYPE_IP:
159
break;
160
}
161
162
tunnel->sock = NULL;
163
sock_put(sk);
164
}
165
166
kfree_rcu(tunnel, rcu);
167
}
168
169
static void l2tp_session_free(struct l2tp_session *session)
170
{
171
trace_free_session(session);
172
if (session->tunnel)
173
l2tp_tunnel_put(session->tunnel);
174
kfree_rcu(session, rcu);
175
}
176
177
struct l2tp_tunnel *l2tp_sk_to_tunnel(const struct sock *sk)
178
{
179
const struct net *net = sock_net(sk);
180
unsigned long tunnel_id, tmp;
181
struct l2tp_tunnel *tunnel;
182
struct l2tp_net *pn;
183
184
rcu_read_lock_bh();
185
pn = l2tp_pernet(net);
186
idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
187
if (tunnel &&
188
tunnel->sock == sk &&
189
refcount_inc_not_zero(&tunnel->ref_count)) {
190
rcu_read_unlock_bh();
191
return tunnel;
192
}
193
}
194
rcu_read_unlock_bh();
195
196
return NULL;
197
}
198
EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
199
200
void l2tp_tunnel_put(struct l2tp_tunnel *tunnel)
201
{
202
if (refcount_dec_and_test(&tunnel->ref_count))
203
l2tp_tunnel_free(tunnel);
204
}
205
EXPORT_SYMBOL_GPL(l2tp_tunnel_put);
206
207
void l2tp_session_put(struct l2tp_session *session)
208
{
209
if (refcount_dec_and_test(&session->ref_count))
210
l2tp_session_free(session);
211
}
212
EXPORT_SYMBOL_GPL(l2tp_session_put);
213
214
/* Lookup a tunnel. A new reference is held on the returned tunnel. */
215
struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
216
{
217
const struct l2tp_net *pn = l2tp_pernet(net);
218
struct l2tp_tunnel *tunnel;
219
220
rcu_read_lock_bh();
221
tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
222
if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
223
rcu_read_unlock_bh();
224
return tunnel;
225
}
226
rcu_read_unlock_bh();
227
228
return NULL;
229
}
230
EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
231
232
struct l2tp_tunnel *l2tp_tunnel_get_next(const struct net *net, unsigned long *key)
233
{
234
struct l2tp_net *pn = l2tp_pernet(net);
235
struct l2tp_tunnel *tunnel = NULL;
236
237
rcu_read_lock_bh();
238
again:
239
tunnel = idr_get_next_ul(&pn->l2tp_tunnel_idr, key);
240
if (tunnel) {
241
if (refcount_inc_not_zero(&tunnel->ref_count)) {
242
rcu_read_unlock_bh();
243
return tunnel;
244
}
245
(*key)++;
246
goto again;
247
}
248
rcu_read_unlock_bh();
249
250
return NULL;
251
}
252
EXPORT_SYMBOL_GPL(l2tp_tunnel_get_next);
253
254
struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id)
255
{
256
const struct l2tp_net *pn = l2tp_pernet(net);
257
struct l2tp_session *session;
258
259
rcu_read_lock_bh();
260
session = idr_find(&pn->l2tp_v3_session_idr, session_id);
261
if (session && !hash_hashed(&session->hlist) &&
262
refcount_inc_not_zero(&session->ref_count)) {
263
rcu_read_unlock_bh();
264
return session;
265
}
266
267
/* If we get here and session is non-NULL, the session_id
268
* collides with one in another tunnel. If sk is non-NULL,
269
* find the session matching sk.
270
*/
271
if (session && sk) {
272
unsigned long key = l2tp_v3_session_hashkey(sk, session->session_id);
273
274
hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
275
hlist, key) {
276
/* session->tunnel may be NULL if another thread is in
277
* l2tp_session_register and has added an item to
278
* l2tp_v3_session_htable but hasn't yet added the
279
* session to its tunnel's session_list.
280
*/
281
struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
282
283
if (session->session_id == session_id &&
284
tunnel && tunnel->sock == sk &&
285
refcount_inc_not_zero(&session->ref_count)) {
286
rcu_read_unlock_bh();
287
return session;
288
}
289
}
290
}
291
rcu_read_unlock_bh();
292
293
return NULL;
294
}
295
EXPORT_SYMBOL_GPL(l2tp_v3_session_get);
296
297
struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id)
298
{
299
u32 session_key = l2tp_v2_session_key(tunnel_id, session_id);
300
const struct l2tp_net *pn = l2tp_pernet(net);
301
struct l2tp_session *session;
302
303
rcu_read_lock_bh();
304
session = idr_find(&pn->l2tp_v2_session_idr, session_key);
305
if (session && refcount_inc_not_zero(&session->ref_count)) {
306
rcu_read_unlock_bh();
307
return session;
308
}
309
rcu_read_unlock_bh();
310
311
return NULL;
312
}
313
EXPORT_SYMBOL_GPL(l2tp_v2_session_get);
314
315
struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver,
316
u32 tunnel_id, u32 session_id)
317
{
318
if (pver == L2TP_HDR_VER_2)
319
return l2tp_v2_session_get(net, tunnel_id, session_id);
320
else
321
return l2tp_v3_session_get(net, sk, session_id);
322
}
323
EXPORT_SYMBOL_GPL(l2tp_session_get);
324
325
static struct l2tp_session *l2tp_v2_session_get_next(const struct net *net,
326
u16 tid,
327
unsigned long *key)
328
{
329
struct l2tp_net *pn = l2tp_pernet(net);
330
struct l2tp_session *session = NULL;
331
332
/* Start searching within the range of the tid */
333
if (*key == 0)
334
*key = l2tp_v2_session_key(tid, 0);
335
336
rcu_read_lock_bh();
337
again:
338
session = idr_get_next_ul(&pn->l2tp_v2_session_idr, key);
339
if (session) {
340
struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
341
342
/* ignore sessions with id 0 as they are internal for pppol2tp */
343
if (session->session_id == 0) {
344
(*key)++;
345
goto again;
346
}
347
348
if (tunnel->tunnel_id == tid &&
349
refcount_inc_not_zero(&session->ref_count)) {
350
rcu_read_unlock_bh();
351
return session;
352
}
353
354
(*key)++;
355
if (tunnel->tunnel_id == tid)
356
goto again;
357
}
358
rcu_read_unlock_bh();
359
360
return NULL;
361
}
362
363
static struct l2tp_session *l2tp_v3_session_get_next(const struct net *net,
364
u32 tid, struct sock *sk,
365
unsigned long *key)
366
{
367
struct l2tp_net *pn = l2tp_pernet(net);
368
struct l2tp_session *session = NULL;
369
370
rcu_read_lock_bh();
371
again:
372
session = idr_get_next_ul(&pn->l2tp_v3_session_idr, key);
373
if (session && !hash_hashed(&session->hlist)) {
374
struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
375
376
if (tunnel && tunnel->tunnel_id == tid &&
377
refcount_inc_not_zero(&session->ref_count)) {
378
rcu_read_unlock_bh();
379
return session;
380
}
381
382
(*key)++;
383
goto again;
384
}
385
386
/* If we get here and session is non-NULL, the IDR entry may be one
387
* where the session_id collides with one in another tunnel. Check
388
* session_htable for a match. There can only be one session of a given
389
* ID per tunnel so we can return as soon as a match is found.
390
*/
391
if (session && hash_hashed(&session->hlist)) {
392
unsigned long hkey = l2tp_v3_session_hashkey(sk, session->session_id);
393
u32 sid = session->session_id;
394
395
hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session,
396
hlist, hkey) {
397
struct l2tp_tunnel *tunnel = READ_ONCE(session->tunnel);
398
399
if (session->session_id == sid &&
400
tunnel && tunnel->tunnel_id == tid &&
401
refcount_inc_not_zero(&session->ref_count)) {
402
rcu_read_unlock_bh();
403
return session;
404
}
405
}
406
407
/* If no match found, the colliding session ID isn't in our
408
* tunnel so try the next session ID.
409
*/
410
(*key)++;
411
goto again;
412
}
413
414
rcu_read_unlock_bh();
415
416
return NULL;
417
}
418
419
struct l2tp_session *l2tp_session_get_next(const struct net *net, struct sock *sk, int pver,
420
u32 tunnel_id, unsigned long *key)
421
{
422
if (pver == L2TP_HDR_VER_2)
423
return l2tp_v2_session_get_next(net, tunnel_id, key);
424
else
425
return l2tp_v3_session_get_next(net, tunnel_id, sk, key);
426
}
427
EXPORT_SYMBOL_GPL(l2tp_session_get_next);
428
429
/* Lookup a session by interface name.
430
* This is very inefficient but is only used by management interfaces.
431
*/
432
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
433
const char *ifname)
434
{
435
struct l2tp_net *pn = l2tp_pernet(net);
436
unsigned long tunnel_id, tmp;
437
struct l2tp_session *session;
438
struct l2tp_tunnel *tunnel;
439
440
rcu_read_lock_bh();
441
idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
442
if (tunnel) {
443
list_for_each_entry_rcu(session, &tunnel->session_list, list) {
444
if (!strcmp(session->ifname, ifname)) {
445
refcount_inc(&session->ref_count);
446
rcu_read_unlock_bh();
447
448
return session;
449
}
450
}
451
}
452
}
453
rcu_read_unlock_bh();
454
455
return NULL;
456
}
457
EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
458
459
static void l2tp_session_coll_list_add(struct l2tp_session_coll_list *clist,
460
struct l2tp_session *session)
461
{
462
refcount_inc(&session->ref_count);
463
WARN_ON_ONCE(session->coll_list);
464
session->coll_list = clist;
465
spin_lock(&clist->lock);
466
list_add(&session->clist, &clist->list);
467
spin_unlock(&clist->lock);
468
}
469
470
static int l2tp_session_collision_add(struct l2tp_net *pn,
471
struct l2tp_session *session1,
472
struct l2tp_session *session2)
473
{
474
struct l2tp_session_coll_list *clist;
475
476
lockdep_assert_held(&pn->l2tp_session_idr_lock);
477
478
if (!session2)
479
return -EEXIST;
480
481
/* If existing session is in IP-encap tunnel, refuse new session */
482
if (session2->tunnel->encap == L2TP_ENCAPTYPE_IP)
483
return -EEXIST;
484
485
clist = session2->coll_list;
486
if (!clist) {
487
/* First collision. Allocate list to manage the collided sessions
488
* and add the existing session to the list.
489
*/
490
clist = kmalloc(sizeof(*clist), GFP_ATOMIC);
491
if (!clist)
492
return -ENOMEM;
493
494
spin_lock_init(&clist->lock);
495
INIT_LIST_HEAD(&clist->list);
496
refcount_set(&clist->ref_count, 1);
497
l2tp_session_coll_list_add(clist, session2);
498
}
499
500
/* If existing session isn't already in the session hlist, add it. */
501
if (!hash_hashed(&session2->hlist))
502
hash_add_rcu(pn->l2tp_v3_session_htable, &session2->hlist,
503
session2->hlist_key);
504
505
/* Add new session to the hlist and collision list */
506
hash_add_rcu(pn->l2tp_v3_session_htable, &session1->hlist,
507
session1->hlist_key);
508
refcount_inc(&clist->ref_count);
509
l2tp_session_coll_list_add(clist, session1);
510
511
return 0;
512
}
513
514
static void l2tp_session_collision_del(struct l2tp_net *pn,
515
struct l2tp_session *session)
516
{
517
struct l2tp_session_coll_list *clist = session->coll_list;
518
unsigned long session_key = session->session_id;
519
struct l2tp_session *session2;
520
521
lockdep_assert_held(&pn->l2tp_session_idr_lock);
522
523
hash_del_rcu(&session->hlist);
524
525
if (clist) {
526
/* Remove session from its collision list. If there
527
* are other sessions with the same ID, replace this
528
* session's IDR entry with that session, otherwise
529
* remove the IDR entry. If this is the last session,
530
* the collision list data is freed.
531
*/
532
spin_lock(&clist->lock);
533
list_del_init(&session->clist);
534
session2 = list_first_entry_or_null(&clist->list, struct l2tp_session, clist);
535
if (session2) {
536
void *old = idr_replace(&pn->l2tp_v3_session_idr, session2, session_key);
537
538
WARN_ON_ONCE(IS_ERR_VALUE(old));
539
} else {
540
void *removed = idr_remove(&pn->l2tp_v3_session_idr, session_key);
541
542
WARN_ON_ONCE(removed != session);
543
}
544
session->coll_list = NULL;
545
spin_unlock(&clist->lock);
546
if (refcount_dec_and_test(&clist->ref_count))
547
kfree(clist);
548
l2tp_session_put(session);
549
}
550
}
551
552
int l2tp_session_register(struct l2tp_session *session,
553
struct l2tp_tunnel *tunnel)
554
{
555
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
556
struct l2tp_session *other_session = NULL;
557
void *old = NULL;
558
u32 session_key;
559
int err;
560
561
spin_lock_bh(&tunnel->list_lock);
562
spin_lock_bh(&pn->l2tp_session_idr_lock);
563
564
if (!tunnel->acpt_newsess) {
565
err = -ENODEV;
566
goto out;
567
}
568
569
if (tunnel->version == L2TP_HDR_VER_3) {
570
session_key = session->session_id;
571
err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL,
572
&session_key, session_key, GFP_ATOMIC);
573
/* IP encap expects session IDs to be globally unique, while
574
* UDP encap doesn't. This isn't per the RFC, which says that
575
* sessions are identified only by the session ID, but is to
576
* support existing userspace which depends on it.
577
*/
578
if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) {
579
other_session = idr_find(&pn->l2tp_v3_session_idr,
580
session_key);
581
err = l2tp_session_collision_add(pn, session,
582
other_session);
583
}
584
} else {
585
session_key = l2tp_v2_session_key(tunnel->tunnel_id,
586
session->session_id);
587
err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL,
588
&session_key, session_key, GFP_ATOMIC);
589
}
590
591
if (err) {
592
if (err == -ENOSPC)
593
err = -EEXIST;
594
goto out;
595
}
596
597
refcount_inc(&tunnel->ref_count);
598
WRITE_ONCE(session->tunnel, tunnel);
599
list_add_rcu(&session->list, &tunnel->session_list);
600
601
/* this makes session available to lockless getters */
602
if (tunnel->version == L2TP_HDR_VER_3) {
603
if (!other_session)
604
old = idr_replace(&pn->l2tp_v3_session_idr, session, session_key);
605
} else {
606
old = idr_replace(&pn->l2tp_v2_session_idr, session, session_key);
607
}
608
609
/* old should be NULL, unless something removed or modified
610
* the IDR entry after our idr_alloc_32 above (which shouldn't
611
* happen).
612
*/
613
WARN_ON_ONCE(old);
614
out:
615
spin_unlock_bh(&pn->l2tp_session_idr_lock);
616
spin_unlock_bh(&tunnel->list_lock);
617
618
if (!err)
619
trace_register_session(session);
620
621
return err;
622
}
623
EXPORT_SYMBOL_GPL(l2tp_session_register);
624
625
/*****************************************************************************
626
* Receive data handling
627
*****************************************************************************/
628
629
/* Queue a skb in order. We come here only if the skb has an L2TP sequence
630
* number.
631
*/
632
static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
633
{
634
struct sk_buff *skbp;
635
struct sk_buff *tmp;
636
u32 ns = L2TP_SKB_CB(skb)->ns;
637
638
spin_lock_bh(&session->reorder_q.lock);
639
skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
640
if (L2TP_SKB_CB(skbp)->ns > ns) {
641
__skb_queue_before(&session->reorder_q, skbp, skb);
642
atomic_long_inc(&session->stats.rx_oos_packets);
643
goto out;
644
}
645
}
646
647
__skb_queue_tail(&session->reorder_q, skb);
648
649
out:
650
spin_unlock_bh(&session->reorder_q.lock);
651
}
652
653
/* Dequeue a single skb.
654
*/
655
static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
656
{
657
struct l2tp_tunnel *tunnel = session->tunnel;
658
int length = L2TP_SKB_CB(skb)->length;
659
660
/* We're about to requeue the skb, so return resources
661
* to its current owner (a socket receive buffer).
662
*/
663
skb_orphan(skb);
664
665
atomic_long_inc(&tunnel->stats.rx_packets);
666
atomic_long_add(length, &tunnel->stats.rx_bytes);
667
atomic_long_inc(&session->stats.rx_packets);
668
atomic_long_add(length, &session->stats.rx_bytes);
669
670
if (L2TP_SKB_CB(skb)->has_seq) {
671
/* Bump our Nr */
672
session->nr++;
673
session->nr &= session->nr_max;
674
trace_session_seqnum_update(session);
675
}
676
677
/* call private receive handler */
678
if (session->recv_skb)
679
(*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
680
else
681
kfree_skb(skb);
682
}
683
684
/* Dequeue skbs from the session's reorder_q, subject to packet order.
685
* Skbs that have been in the queue for too long are simply discarded.
686
*/
687
static void l2tp_recv_dequeue(struct l2tp_session *session)
688
{
689
struct sk_buff *skb;
690
struct sk_buff *tmp;
691
692
/* If the pkt at the head of the queue has the nr that we
693
* expect to send up next, dequeue it and any other
694
* in-sequence packets behind it.
695
*/
696
start:
697
spin_lock_bh(&session->reorder_q.lock);
698
skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
699
struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
700
701
/* If the packet has been pending on the queue for too long, discard it */
702
if (time_after(jiffies, cb->expires)) {
703
atomic_long_inc(&session->stats.rx_seq_discards);
704
atomic_long_inc(&session->stats.rx_errors);
705
trace_session_pkt_expired(session, cb->ns);
706
session->reorder_skip = 1;
707
__skb_unlink(skb, &session->reorder_q);
708
kfree_skb(skb);
709
continue;
710
}
711
712
if (cb->has_seq) {
713
if (session->reorder_skip) {
714
session->reorder_skip = 0;
715
session->nr = cb->ns;
716
trace_session_seqnum_reset(session);
717
}
718
if (cb->ns != session->nr)
719
goto out;
720
}
721
__skb_unlink(skb, &session->reorder_q);
722
723
/* Process the skb. We release the queue lock while we
724
* do so to let other contexts process the queue.
725
*/
726
spin_unlock_bh(&session->reorder_q.lock);
727
l2tp_recv_dequeue_skb(session, skb);
728
goto start;
729
}
730
731
out:
732
spin_unlock_bh(&session->reorder_q.lock);
733
}
734
735
static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
736
{
737
u32 nws;
738
739
if (nr >= session->nr)
740
nws = nr - session->nr;
741
else
742
nws = (session->nr_max + 1) - (session->nr - nr);
743
744
return nws < session->nr_window_size;
745
}
746
747
/* If packet has sequence numbers, queue it if acceptable. Returns 0 if
748
* acceptable, else non-zero.
749
*/
750
static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
751
{
752
struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
753
754
if (!l2tp_seq_check_rx_window(session, cb->ns)) {
755
/* Packet sequence number is outside allowed window.
756
* Discard it.
757
*/
758
trace_session_pkt_outside_rx_window(session, cb->ns);
759
goto discard;
760
}
761
762
if (session->reorder_timeout != 0) {
763
/* Packet reordering enabled. Add skb to session's
764
* reorder queue, in order of ns.
765
*/
766
l2tp_recv_queue_skb(session, skb);
767
goto out;
768
}
769
770
/* Packet reordering disabled. Discard out-of-sequence packets, while
771
* tracking the number if in-sequence packets after the first OOS packet
772
* is seen. After nr_oos_count_max in-sequence packets, reset the
773
* sequence number to re-enable packet reception.
774
*/
775
if (cb->ns == session->nr) {
776
skb_queue_tail(&session->reorder_q, skb);
777
} else {
778
u32 nr_oos = cb->ns;
779
u32 nr_next = (session->nr_oos + 1) & session->nr_max;
780
781
if (nr_oos == nr_next)
782
session->nr_oos_count++;
783
else
784
session->nr_oos_count = 0;
785
786
session->nr_oos = nr_oos;
787
if (session->nr_oos_count > session->nr_oos_count_max) {
788
session->reorder_skip = 1;
789
}
790
if (!session->reorder_skip) {
791
atomic_long_inc(&session->stats.rx_seq_discards);
792
trace_session_pkt_oos(session, cb->ns);
793
goto discard;
794
}
795
skb_queue_tail(&session->reorder_q, skb);
796
}
797
798
out:
799
return 0;
800
801
discard:
802
return 1;
803
}
804
805
/* Do receive processing of L2TP data frames. We handle both L2TPv2
806
* and L2TPv3 data frames here.
807
*
808
* L2TPv2 Data Message Header
809
*
810
* 0 1 2 3
811
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
812
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
813
* |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
814
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
815
* | Tunnel ID | Session ID |
816
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
817
* | Ns (opt) | Nr (opt) |
818
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
819
* | Offset Size (opt) | Offset pad... (opt)
820
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
821
*
822
* Data frames are marked by T=0. All other fields are the same as
823
* those in L2TP control frames.
824
*
825
* L2TPv3 Data Message Header
826
*
827
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
828
* | L2TP Session Header |
829
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
830
* | L2-Specific Sublayer |
831
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
832
* | Tunnel Payload ...
833
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
834
*
835
* L2TPv3 Session Header Over IP
836
*
837
* 0 1 2 3
838
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
839
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
840
* | Session ID |
841
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
842
* | Cookie (optional, maximum 64 bits)...
843
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
844
* |
845
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
846
*
847
* L2TPv3 L2-Specific Sublayer Format
848
*
849
* 0 1 2 3
850
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
851
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
852
* |x|S|x|x|x|x|x|x| Sequence Number |
853
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
854
*
855
* Cookie value and sublayer format are negotiated with the peer when
856
* the session is set up. Unlike L2TPv2, we do not need to parse the
857
* packet header to determine if optional fields are present.
858
*
859
* Caller must already have parsed the frame and determined that it is
860
* a data (not control) frame before coming here. Fields up to the
861
* session-id have already been parsed and ptr points to the data
862
* after the session-id.
863
*/
864
void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
865
unsigned char *ptr, unsigned char *optr, u16 hdrflags,
866
int length)
867
{
868
struct l2tp_tunnel *tunnel = session->tunnel;
869
int offset;
870
871
/* Parse and check optional cookie */
872
if (session->peer_cookie_len > 0) {
873
if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
874
pr_debug_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
875
tunnel->name, tunnel->tunnel_id,
876
session->session_id);
877
atomic_long_inc(&session->stats.rx_cookie_discards);
878
goto discard;
879
}
880
ptr += session->peer_cookie_len;
881
}
882
883
/* Handle the optional sequence numbers. Sequence numbers are
884
* in different places for L2TPv2 and L2TPv3.
885
*
886
* If we are the LAC, enable/disable sequence numbers under
887
* the control of the LNS. If no sequence numbers present but
888
* we were expecting them, discard frame.
889
*/
890
L2TP_SKB_CB(skb)->has_seq = 0;
891
if (tunnel->version == L2TP_HDR_VER_2) {
892
if (hdrflags & L2TP_HDRFLAG_S) {
893
/* Store L2TP info in the skb */
894
L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
895
L2TP_SKB_CB(skb)->has_seq = 1;
896
ptr += 2;
897
/* Skip past nr in the header */
898
ptr += 2;
899
900
}
901
} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
902
u32 l2h = ntohl(*(__be32 *)ptr);
903
904
if (l2h & 0x40000000) {
905
/* Store L2TP info in the skb */
906
L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
907
L2TP_SKB_CB(skb)->has_seq = 1;
908
}
909
ptr += 4;
910
}
911
912
if (L2TP_SKB_CB(skb)->has_seq) {
913
/* Received a packet with sequence numbers. If we're the LAC,
914
* check if we sre sending sequence numbers and if not,
915
* configure it so.
916
*/
917
if (!session->lns_mode && !session->send_seq) {
918
trace_session_seqnum_lns_enable(session);
919
session->send_seq = 1;
920
l2tp_session_set_header_len(session, tunnel->version,
921
tunnel->encap);
922
}
923
} else {
924
/* No sequence numbers.
925
* If user has configured mandatory sequence numbers, discard.
926
*/
927
if (session->recv_seq) {
928
pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
929
session->name);
930
atomic_long_inc(&session->stats.rx_seq_discards);
931
goto discard;
932
}
933
934
/* If we're the LAC and we're sending sequence numbers, the
935
* LNS has requested that we no longer send sequence numbers.
936
* If we're the LNS and we're sending sequence numbers, the
937
* LAC is broken. Discard the frame.
938
*/
939
if (!session->lns_mode && session->send_seq) {
940
trace_session_seqnum_lns_disable(session);
941
session->send_seq = 0;
942
l2tp_session_set_header_len(session, tunnel->version,
943
tunnel->encap);
944
} else if (session->send_seq) {
945
pr_debug_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
946
session->name);
947
atomic_long_inc(&session->stats.rx_seq_discards);
948
goto discard;
949
}
950
}
951
952
/* Session data offset is defined only for L2TPv2 and is
953
* indicated by an optional 16-bit value in the header.
954
*/
955
if (tunnel->version == L2TP_HDR_VER_2) {
956
/* If offset bit set, skip it. */
957
if (hdrflags & L2TP_HDRFLAG_O) {
958
offset = ntohs(*(__be16 *)ptr);
959
ptr += 2 + offset;
960
}
961
}
962
963
offset = ptr - optr;
964
if (!pskb_may_pull(skb, offset))
965
goto discard;
966
967
__skb_pull(skb, offset);
968
969
/* Prepare skb for adding to the session's reorder_q. Hold
970
* packets for max reorder_timeout or 1 second if not
971
* reordering.
972
*/
973
L2TP_SKB_CB(skb)->length = length;
974
L2TP_SKB_CB(skb)->expires = jiffies +
975
(session->reorder_timeout ? session->reorder_timeout : HZ);
976
977
/* Add packet to the session's receive queue. Reordering is done here, if
978
* enabled. Saved L2TP protocol info is stored in skb->sb[].
979
*/
980
if (L2TP_SKB_CB(skb)->has_seq) {
981
if (l2tp_recv_data_seq(session, skb))
982
goto discard;
983
} else {
984
/* No sequence numbers. Add the skb to the tail of the
985
* reorder queue. This ensures that it will be
986
* delivered after all previous sequenced skbs.
987
*/
988
skb_queue_tail(&session->reorder_q, skb);
989
}
990
991
/* Try to dequeue as many skbs from reorder_q as we can. */
992
l2tp_recv_dequeue(session);
993
994
return;
995
996
discard:
997
atomic_long_inc(&session->stats.rx_errors);
998
kfree_skb(skb);
999
}
1000
EXPORT_SYMBOL_GPL(l2tp_recv_common);
1001
1002
/* Drop skbs from the session's reorder_q
1003
*/
1004
static void l2tp_session_queue_purge(struct l2tp_session *session)
1005
{
1006
struct sk_buff *skb = NULL;
1007
1008
while ((skb = skb_dequeue(&session->reorder_q))) {
1009
atomic_long_inc(&session->stats.rx_errors);
1010
kfree_skb(skb);
1011
}
1012
}
1013
1014
/* UDP encapsulation receive handler. See net/ipv4/udp.c for details. */
1015
int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1016
{
1017
struct l2tp_session *session = NULL;
1018
struct l2tp_tunnel *tunnel = NULL;
1019
struct net *net = sock_net(sk);
1020
unsigned char *ptr, *optr;
1021
u16 hdrflags;
1022
u16 version;
1023
int length;
1024
1025
/* UDP has verified checksum */
1026
1027
/* UDP always verifies the packet length. */
1028
__skb_pull(skb, sizeof(struct udphdr));
1029
1030
/* Short packet? */
1031
if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX))
1032
goto pass;
1033
1034
/* Point to L2TP header */
1035
optr = skb->data;
1036
ptr = skb->data;
1037
1038
/* Get L2TP header flags */
1039
hdrflags = ntohs(*(__be16 *)ptr);
1040
1041
/* Get protocol version */
1042
version = hdrflags & L2TP_HDR_VER_MASK;
1043
1044
/* Get length of L2TP packet */
1045
length = skb->len;
1046
1047
/* If type is control packet, it is handled by userspace. */
1048
if (hdrflags & L2TP_HDRFLAG_T)
1049
goto pass;
1050
1051
/* Skip flags */
1052
ptr += 2;
1053
1054
if (version == L2TP_HDR_VER_2) {
1055
u16 tunnel_id, session_id;
1056
1057
/* If length is present, skip it */
1058
if (hdrflags & L2TP_HDRFLAG_L)
1059
ptr += 2;
1060
1061
/* Extract tunnel and session ID */
1062
tunnel_id = ntohs(*(__be16 *)ptr);
1063
ptr += 2;
1064
session_id = ntohs(*(__be16 *)ptr);
1065
ptr += 2;
1066
1067
session = l2tp_v2_session_get(net, tunnel_id, session_id);
1068
} else {
1069
u32 session_id;
1070
1071
ptr += 2; /* skip reserved bits */
1072
session_id = ntohl(*(__be32 *)ptr);
1073
ptr += 4;
1074
1075
session = l2tp_v3_session_get(net, sk, session_id);
1076
}
1077
1078
if (!session || !session->recv_skb) {
1079
if (session)
1080
l2tp_session_put(session);
1081
1082
/* Not found? Pass to userspace to deal with */
1083
goto pass;
1084
}
1085
1086
tunnel = session->tunnel;
1087
1088
/* Check protocol version */
1089
if (version != tunnel->version)
1090
goto invalid;
1091
1092
if (version == L2TP_HDR_VER_3 &&
1093
l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) {
1094
l2tp_session_put(session);
1095
goto invalid;
1096
}
1097
1098
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
1099
l2tp_session_put(session);
1100
1101
return 0;
1102
1103
invalid:
1104
atomic_long_inc(&tunnel->stats.rx_invalid);
1105
1106
pass:
1107
/* Put UDP header back */
1108
__skb_push(skb, sizeof(struct udphdr));
1109
1110
return 1;
1111
}
1112
EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
1113
1114
/* UDP encapsulation receive error handler. See net/ipv4/udp.c for details. */
1115
static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err,
1116
__be16 port, u32 info, u8 *payload)
1117
{
1118
sk->sk_err = err;
1119
sk_error_report(sk);
1120
1121
if (ip_hdr(skb)->version == IPVERSION) {
1122
if (inet_test_bit(RECVERR, sk))
1123
return ip_icmp_error(sk, skb, err, port, info, payload);
1124
#if IS_ENABLED(CONFIG_IPV6)
1125
} else {
1126
if (inet6_test_bit(RECVERR6, sk))
1127
return ipv6_icmp_error(sk, skb, err, port, info, payload);
1128
#endif
1129
}
1130
}
1131
1132
/************************************************************************
1133
* Transmit handling
1134
***********************************************************************/
1135
1136
/* Build an L2TP header for the session into the buffer provided.
1137
*/
1138
static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
1139
{
1140
struct l2tp_tunnel *tunnel = session->tunnel;
1141
__be16 *bufp = buf;
1142
__be16 *optr = buf;
1143
u16 flags = L2TP_HDR_VER_2;
1144
u32 tunnel_id = tunnel->peer_tunnel_id;
1145
u32 session_id = session->peer_session_id;
1146
1147
if (session->send_seq)
1148
flags |= L2TP_HDRFLAG_S;
1149
1150
/* Setup L2TP header. */
1151
*bufp++ = htons(flags);
1152
*bufp++ = htons(tunnel_id);
1153
*bufp++ = htons(session_id);
1154
if (session->send_seq) {
1155
*bufp++ = htons(session->ns);
1156
*bufp++ = 0;
1157
session->ns++;
1158
session->ns &= 0xffff;
1159
trace_session_seqnum_update(session);
1160
}
1161
1162
return bufp - optr;
1163
}
1164
1165
static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1166
{
1167
struct l2tp_tunnel *tunnel = session->tunnel;
1168
char *bufp = buf;
1169
char *optr = bufp;
1170
1171
/* Setup L2TP header. The header differs slightly for UDP and
1172
* IP encapsulations. For UDP, there is 4 bytes of flags.
1173
*/
1174
if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1175
u16 flags = L2TP_HDR_VER_3;
1176
*((__be16 *)bufp) = htons(flags);
1177
bufp += 2;
1178
*((__be16 *)bufp) = 0;
1179
bufp += 2;
1180
}
1181
1182
*((__be32 *)bufp) = htonl(session->peer_session_id);
1183
bufp += 4;
1184
if (session->cookie_len) {
1185
memcpy(bufp, &session->cookie[0], session->cookie_len);
1186
bufp += session->cookie_len;
1187
}
1188
if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1189
u32 l2h = 0;
1190
1191
if (session->send_seq) {
1192
l2h = 0x40000000 | session->ns;
1193
session->ns++;
1194
session->ns &= 0xffffff;
1195
trace_session_seqnum_update(session);
1196
}
1197
1198
*((__be32 *)bufp) = htonl(l2h);
1199
bufp += 4;
1200
}
1201
1202
return bufp - optr;
1203
}
1204
1205
/* Queue the packet to IP for output: tunnel socket lock must be held */
1206
static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
1207
{
1208
int err;
1209
1210
skb->ignore_df = 1;
1211
skb_dst_drop(skb);
1212
#if IS_ENABLED(CONFIG_IPV6)
1213
if (l2tp_sk_is_v6(tunnel->sock))
1214
err = inet6_csk_xmit(tunnel->sock, skb, NULL);
1215
else
1216
#endif
1217
err = ip_queue_xmit(tunnel->sock, skb, fl);
1218
1219
return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
1220
}
1221
1222
static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
1223
{
1224
struct l2tp_tunnel *tunnel = session->tunnel;
1225
unsigned int data_len = skb->len;
1226
struct sock *sk = tunnel->sock;
1227
int headroom, uhlen, udp_len;
1228
int ret = NET_XMIT_SUCCESS;
1229
struct inet_sock *inet;
1230
struct udphdr *uh;
1231
1232
/* Check that there's enough headroom in the skb to insert IP,
1233
* UDP and L2TP headers. If not enough, expand it to
1234
* make room. Adjust truesize.
1235
*/
1236
uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
1237
headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
1238
if (skb_cow_head(skb, headroom)) {
1239
kfree_skb(skb);
1240
return NET_XMIT_DROP;
1241
}
1242
1243
/* Setup L2TP header */
1244
if (tunnel->version == L2TP_HDR_VER_2)
1245
l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
1246
else
1247
l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
1248
1249
/* Reset skb netfilter state */
1250
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1251
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
1252
nf_reset_ct(skb);
1253
1254
/* L2TP uses its own lockdep subclass to avoid lockdep splats caused by
1255
* nested socket calls on the same lockdep socket class. This can
1256
* happen when data from a user socket is routed over l2tp, which uses
1257
* another userspace socket.
1258
*/
1259
spin_lock_nested(&sk->sk_lock.slock, L2TP_DEPTH_NESTING);
1260
1261
if (sock_owned_by_user(sk)) {
1262
kfree_skb(skb);
1263
ret = NET_XMIT_DROP;
1264
goto out_unlock;
1265
}
1266
1267
/* The user-space may change the connection status for the user-space
1268
* provided socket at run time: we must check it under the socket lock
1269
*/
1270
if (tunnel->fd >= 0 && sk->sk_state != TCP_ESTABLISHED) {
1271
kfree_skb(skb);
1272
ret = NET_XMIT_DROP;
1273
goto out_unlock;
1274
}
1275
1276
/* Report transmitted length before we add encap header, which keeps
1277
* statistics consistent for both UDP and IP encap tx/rx paths.
1278
*/
1279
*len = skb->len;
1280
1281
inet = inet_sk(sk);
1282
switch (tunnel->encap) {
1283
case L2TP_ENCAPTYPE_UDP:
1284
/* Setup UDP header */
1285
__skb_push(skb, sizeof(*uh));
1286
skb_reset_transport_header(skb);
1287
uh = udp_hdr(skb);
1288
uh->source = inet->inet_sport;
1289
uh->dest = inet->inet_dport;
1290
udp_len = uhlen + session->hdr_len + data_len;
1291
uh->len = htons(udp_len);
1292
1293
/* Calculate UDP checksum if configured to do so */
1294
#if IS_ENABLED(CONFIG_IPV6)
1295
if (l2tp_sk_is_v6(sk))
1296
udp6_set_csum(udp_get_no_check6_tx(sk),
1297
skb, &inet6_sk(sk)->saddr,
1298
&sk->sk_v6_daddr, udp_len);
1299
else
1300
#endif
1301
udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
1302
inet->inet_daddr, udp_len);
1303
break;
1304
1305
case L2TP_ENCAPTYPE_IP:
1306
break;
1307
}
1308
1309
ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
1310
1311
out_unlock:
1312
spin_unlock(&sk->sk_lock.slock);
1313
1314
return ret;
1315
}
1316
1317
/* If caller requires the skb to have a ppp header, the header must be
1318
* inserted in the skb data before calling this function.
1319
*/
1320
int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
1321
{
1322
unsigned int len = 0;
1323
int ret;
1324
1325
ret = l2tp_xmit_core(session, skb, &len);
1326
if (ret == NET_XMIT_SUCCESS) {
1327
atomic_long_inc(&session->tunnel->stats.tx_packets);
1328
atomic_long_add(len, &session->tunnel->stats.tx_bytes);
1329
atomic_long_inc(&session->stats.tx_packets);
1330
atomic_long_add(len, &session->stats.tx_bytes);
1331
} else {
1332
atomic_long_inc(&session->tunnel->stats.tx_errors);
1333
atomic_long_inc(&session->stats.tx_errors);
1334
}
1335
return ret;
1336
}
1337
EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1338
1339
/*****************************************************************************
1340
* Tinnel and session create/destroy.
1341
*****************************************************************************/
1342
1343
/* Remove an l2tp session from l2tp_core's lists. */
1344
static void l2tp_session_unhash(struct l2tp_session *session)
1345
{
1346
struct l2tp_tunnel *tunnel = session->tunnel;
1347
1348
if (tunnel) {
1349
struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1350
struct l2tp_session *removed = session;
1351
1352
spin_lock_bh(&tunnel->list_lock);
1353
spin_lock_bh(&pn->l2tp_session_idr_lock);
1354
1355
/* Remove from the per-tunnel list */
1356
list_del_init(&session->list);
1357
1358
/* Remove from per-net IDR */
1359
if (tunnel->version == L2TP_HDR_VER_3) {
1360
if (hash_hashed(&session->hlist))
1361
l2tp_session_collision_del(pn, session);
1362
else
1363
removed = idr_remove(&pn->l2tp_v3_session_idr,
1364
session->session_id);
1365
} else {
1366
u32 session_key = l2tp_v2_session_key(tunnel->tunnel_id,
1367
session->session_id);
1368
removed = idr_remove(&pn->l2tp_v2_session_idr,
1369
session_key);
1370
}
1371
WARN_ON_ONCE(removed && removed != session);
1372
1373
spin_unlock_bh(&pn->l2tp_session_idr_lock);
1374
spin_unlock_bh(&tunnel->list_lock);
1375
}
1376
}
1377
1378
/* When the tunnel is closed, all the attached sessions need to go too.
1379
*/
1380
static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1381
{
1382
struct l2tp_session *session;
1383
1384
spin_lock_bh(&tunnel->list_lock);
1385
tunnel->acpt_newsess = false;
1386
list_for_each_entry(session, &tunnel->session_list, list)
1387
l2tp_session_delete(session);
1388
spin_unlock_bh(&tunnel->list_lock);
1389
}
1390
1391
/* Tunnel socket destroy hook for UDP encapsulation */
1392
static void l2tp_udp_encap_destroy(struct sock *sk)
1393
{
1394
struct l2tp_tunnel *tunnel;
1395
1396
tunnel = l2tp_sk_to_tunnel(sk);
1397
if (tunnel) {
1398
l2tp_tunnel_delete(tunnel);
1399
l2tp_tunnel_put(tunnel);
1400
}
1401
}
1402
1403
static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
1404
{
1405
struct l2tp_net *pn = l2tp_pernet(net);
1406
1407
spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1408
idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
1409
spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1410
}
1411
1412
/* Workqueue tunnel deletion function */
1413
static void l2tp_tunnel_del_work(struct work_struct *work)
1414
{
1415
struct l2tp_tunnel *tunnel = container_of(work, struct l2tp_tunnel,
1416
del_work);
1417
struct sock *sk = tunnel->sock;
1418
struct socket *sock = sk->sk_socket;
1419
1420
l2tp_tunnel_closeall(tunnel);
1421
1422
/* If the tunnel socket was created within the kernel, use
1423
* the sk API to release it here.
1424
*/
1425
if (tunnel->fd < 0) {
1426
if (sock) {
1427
kernel_sock_shutdown(sock, SHUT_RDWR);
1428
sock_release(sock);
1429
}
1430
}
1431
1432
l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
1433
/* drop initial ref */
1434
l2tp_tunnel_put(tunnel);
1435
1436
/* drop workqueue ref */
1437
l2tp_tunnel_put(tunnel);
1438
}
1439
1440
/* Create a socket for the tunnel, if one isn't set up by
1441
* userspace. This is used for static tunnels where there is no
1442
* managing L2TP daemon.
1443
*
1444
* Since we don't want these sockets to keep a namespace alive by
1445
* themselves, we drop the socket's namespace refcount after creation.
1446
* These sockets are freed when the namespace exits using the pernet
1447
* exit hook.
1448
*/
1449
static int l2tp_tunnel_sock_create(struct net *net,
1450
u32 tunnel_id,
1451
u32 peer_tunnel_id,
1452
struct l2tp_tunnel_cfg *cfg,
1453
struct socket **sockp)
1454
{
1455
int err = -EINVAL;
1456
struct socket *sock = NULL;
1457
struct udp_port_cfg udp_conf;
1458
1459
switch (cfg->encap) {
1460
case L2TP_ENCAPTYPE_UDP:
1461
memset(&udp_conf, 0, sizeof(udp_conf));
1462
1463
#if IS_ENABLED(CONFIG_IPV6)
1464
if (cfg->local_ip6 && cfg->peer_ip6) {
1465
udp_conf.family = AF_INET6;
1466
memcpy(&udp_conf.local_ip6, cfg->local_ip6,
1467
sizeof(udp_conf.local_ip6));
1468
memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
1469
sizeof(udp_conf.peer_ip6));
1470
udp_conf.use_udp6_tx_checksums =
1471
!cfg->udp6_zero_tx_checksums;
1472
udp_conf.use_udp6_rx_checksums =
1473
!cfg->udp6_zero_rx_checksums;
1474
} else
1475
#endif
1476
{
1477
udp_conf.family = AF_INET;
1478
udp_conf.local_ip = cfg->local_ip;
1479
udp_conf.peer_ip = cfg->peer_ip;
1480
udp_conf.use_udp_checksums = cfg->use_udp_checksums;
1481
}
1482
1483
udp_conf.local_udp_port = htons(cfg->local_udp_port);
1484
udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
1485
1486
err = udp_sock_create(net, &udp_conf, &sock);
1487
if (err < 0)
1488
goto out;
1489
1490
break;
1491
1492
case L2TP_ENCAPTYPE_IP:
1493
#if IS_ENABLED(CONFIG_IPV6)
1494
if (cfg->local_ip6 && cfg->peer_ip6) {
1495
struct sockaddr_l2tpip6 ip6_addr = {0};
1496
1497
err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
1498
IPPROTO_L2TP, &sock);
1499
if (err < 0)
1500
goto out;
1501
1502
ip6_addr.l2tp_family = AF_INET6;
1503
memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1504
sizeof(ip6_addr.l2tp_addr));
1505
ip6_addr.l2tp_conn_id = tunnel_id;
1506
err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
1507
sizeof(ip6_addr));
1508
if (err < 0)
1509
goto out;
1510
1511
ip6_addr.l2tp_family = AF_INET6;
1512
memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1513
sizeof(ip6_addr.l2tp_addr));
1514
ip6_addr.l2tp_conn_id = peer_tunnel_id;
1515
err = kernel_connect(sock,
1516
(struct sockaddr *)&ip6_addr,
1517
sizeof(ip6_addr), 0);
1518
if (err < 0)
1519
goto out;
1520
} else
1521
#endif
1522
{
1523
struct sockaddr_l2tpip ip_addr = {0};
1524
1525
err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
1526
IPPROTO_L2TP, &sock);
1527
if (err < 0)
1528
goto out;
1529
1530
ip_addr.l2tp_family = AF_INET;
1531
ip_addr.l2tp_addr = cfg->local_ip;
1532
ip_addr.l2tp_conn_id = tunnel_id;
1533
err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
1534
sizeof(ip_addr));
1535
if (err < 0)
1536
goto out;
1537
1538
ip_addr.l2tp_family = AF_INET;
1539
ip_addr.l2tp_addr = cfg->peer_ip;
1540
ip_addr.l2tp_conn_id = peer_tunnel_id;
1541
err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
1542
sizeof(ip_addr), 0);
1543
if (err < 0)
1544
goto out;
1545
}
1546
break;
1547
1548
default:
1549
goto out;
1550
}
1551
1552
out:
1553
*sockp = sock;
1554
if (err < 0 && sock) {
1555
kernel_sock_shutdown(sock, SHUT_RDWR);
1556
sock_release(sock);
1557
*sockp = NULL;
1558
}
1559
1560
return err;
1561
}
1562
1563
int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
1564
struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1565
{
1566
struct l2tp_tunnel *tunnel = NULL;
1567
int err;
1568
enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1569
1570
if (cfg)
1571
encap = cfg->encap;
1572
1573
tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
1574
if (!tunnel) {
1575
err = -ENOMEM;
1576
goto err;
1577
}
1578
1579
tunnel->version = version;
1580
tunnel->tunnel_id = tunnel_id;
1581
tunnel->peer_tunnel_id = peer_tunnel_id;
1582
1583
sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1584
spin_lock_init(&tunnel->list_lock);
1585
tunnel->acpt_newsess = true;
1586
INIT_LIST_HEAD(&tunnel->session_list);
1587
1588
tunnel->encap = encap;
1589
1590
refcount_set(&tunnel->ref_count, 1);
1591
tunnel->fd = fd;
1592
1593
/* Init delete workqueue struct */
1594
INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1595
1596
err = 0;
1597
err:
1598
if (tunnelp)
1599
*tunnelp = tunnel;
1600
1601
return err;
1602
}
1603
EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1604
1605
static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
1606
enum l2tp_encap_type encap)
1607
{
1608
struct l2tp_tunnel *tunnel;
1609
1610
if (!net_eq(sock_net(sk), net))
1611
return -EINVAL;
1612
1613
if (sk->sk_type != SOCK_DGRAM)
1614
return -EPROTONOSUPPORT;
1615
1616
if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
1617
return -EPROTONOSUPPORT;
1618
1619
if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
1620
(encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
1621
return -EPROTONOSUPPORT;
1622
1623
if (encap == L2TP_ENCAPTYPE_UDP && sk->sk_user_data)
1624
return -EBUSY;
1625
1626
tunnel = l2tp_sk_to_tunnel(sk);
1627
if (tunnel) {
1628
l2tp_tunnel_put(tunnel);
1629
return -EBUSY;
1630
}
1631
1632
return 0;
1633
}
1634
1635
int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
1636
struct l2tp_tunnel_cfg *cfg)
1637
{
1638
struct l2tp_net *pn = l2tp_pernet(net);
1639
u32 tunnel_id = tunnel->tunnel_id;
1640
struct socket *sock;
1641
struct sock *sk;
1642
int ret;
1643
1644
spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1645
ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
1646
GFP_ATOMIC);
1647
spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1648
if (ret)
1649
return ret == -ENOSPC ? -EEXIST : ret;
1650
1651
if (tunnel->fd < 0) {
1652
ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
1653
tunnel->peer_tunnel_id, cfg,
1654
&sock);
1655
if (ret < 0)
1656
goto err;
1657
} else {
1658
sock = sockfd_lookup(tunnel->fd, &ret);
1659
if (!sock)
1660
goto err;
1661
}
1662
1663
sk = sock->sk;
1664
lock_sock(sk);
1665
write_lock_bh(&sk->sk_callback_lock);
1666
ret = l2tp_validate_socket(sk, net, tunnel->encap);
1667
if (ret < 0)
1668
goto err_inval_sock;
1669
write_unlock_bh(&sk->sk_callback_lock);
1670
1671
if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1672
struct udp_tunnel_sock_cfg udp_cfg = {
1673
.encap_type = UDP_ENCAP_L2TPINUDP,
1674
.encap_rcv = l2tp_udp_encap_recv,
1675
.encap_err_rcv = l2tp_udp_encap_err_recv,
1676
.encap_destroy = l2tp_udp_encap_destroy,
1677
};
1678
1679
setup_udp_tunnel_sock(net, sock, &udp_cfg);
1680
}
1681
1682
sk->sk_allocation = GFP_ATOMIC;
1683
release_sock(sk);
1684
1685
sock_hold(sk);
1686
tunnel->sock = sk;
1687
tunnel->l2tp_net = net;
1688
1689
spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
1690
idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
1691
spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
1692
1693
trace_register_tunnel(tunnel);
1694
1695
if (tunnel->fd >= 0)
1696
sockfd_put(sock);
1697
1698
return 0;
1699
1700
err_inval_sock:
1701
write_unlock_bh(&sk->sk_callback_lock);
1702
release_sock(sk);
1703
1704
if (tunnel->fd < 0)
1705
sock_release(sock);
1706
else
1707
sockfd_put(sock);
1708
err:
1709
l2tp_tunnel_remove(net, tunnel);
1710
return ret;
1711
}
1712
EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
1713
1714
/* This function is used by the netlink TUNNEL_DELETE command.
1715
*/
1716
void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1717
{
1718
if (!test_and_set_bit(0, &tunnel->dead)) {
1719
trace_delete_tunnel(tunnel);
1720
refcount_inc(&tunnel->ref_count);
1721
queue_work(l2tp_wq, &tunnel->del_work);
1722
}
1723
}
1724
EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1725
1726
void l2tp_session_delete(struct l2tp_session *session)
1727
{
1728
if (!test_and_set_bit(0, &session->dead)) {
1729
trace_delete_session(session);
1730
refcount_inc(&session->ref_count);
1731
queue_work(l2tp_wq, &session->del_work);
1732
}
1733
}
1734
EXPORT_SYMBOL_GPL(l2tp_session_delete);
1735
1736
/* Workqueue session deletion function */
1737
static void l2tp_session_del_work(struct work_struct *work)
1738
{
1739
struct l2tp_session *session = container_of(work, struct l2tp_session,
1740
del_work);
1741
1742
l2tp_session_unhash(session);
1743
l2tp_session_queue_purge(session);
1744
if (session->session_close)
1745
(*session->session_close)(session);
1746
1747
/* drop initial ref */
1748
l2tp_session_put(session);
1749
1750
/* drop workqueue ref */
1751
l2tp_session_put(session);
1752
}
1753
1754
/* We come here whenever a session's send_seq, cookie_len or
1755
* l2specific_type parameters are set.
1756
*/
1757
void l2tp_session_set_header_len(struct l2tp_session *session, int version,
1758
enum l2tp_encap_type encap)
1759
{
1760
if (version == L2TP_HDR_VER_2) {
1761
session->hdr_len = 6;
1762
if (session->send_seq)
1763
session->hdr_len += 4;
1764
} else {
1765
session->hdr_len = 4 + session->cookie_len;
1766
session->hdr_len += l2tp_get_l2specific_len(session);
1767
if (encap == L2TP_ENCAPTYPE_UDP)
1768
session->hdr_len += 4;
1769
}
1770
}
1771
EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1772
1773
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
1774
u32 peer_session_id, struct l2tp_session_cfg *cfg)
1775
{
1776
struct l2tp_session *session;
1777
1778
session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
1779
if (session) {
1780
session->magic = L2TP_SESSION_MAGIC;
1781
1782
session->session_id = session_id;
1783
session->peer_session_id = peer_session_id;
1784
session->nr = 0;
1785
if (tunnel->version == L2TP_HDR_VER_2)
1786
session->nr_max = 0xffff;
1787
else
1788
session->nr_max = 0xffffff;
1789
session->nr_window_size = session->nr_max / 2;
1790
session->nr_oos_count_max = 4;
1791
1792
/* Use NR of first received packet */
1793
session->reorder_skip = 1;
1794
1795
sprintf(&session->name[0], "sess %u/%u",
1796
tunnel->tunnel_id, session->session_id);
1797
1798
skb_queue_head_init(&session->reorder_q);
1799
1800
session->hlist_key = l2tp_v3_session_hashkey(tunnel->sock, session->session_id);
1801
INIT_HLIST_NODE(&session->hlist);
1802
INIT_LIST_HEAD(&session->clist);
1803
INIT_LIST_HEAD(&session->list);
1804
INIT_WORK(&session->del_work, l2tp_session_del_work);
1805
1806
if (cfg) {
1807
session->pwtype = cfg->pw_type;
1808
session->send_seq = cfg->send_seq;
1809
session->recv_seq = cfg->recv_seq;
1810
session->lns_mode = cfg->lns_mode;
1811
session->reorder_timeout = cfg->reorder_timeout;
1812
session->l2specific_type = cfg->l2specific_type;
1813
session->cookie_len = cfg->cookie_len;
1814
memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1815
session->peer_cookie_len = cfg->peer_cookie_len;
1816
memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1817
}
1818
1819
l2tp_session_set_header_len(session, tunnel->version, tunnel->encap);
1820
1821
refcount_set(&session->ref_count, 1);
1822
1823
return session;
1824
}
1825
1826
return ERR_PTR(-ENOMEM);
1827
}
1828
EXPORT_SYMBOL_GPL(l2tp_session_create);
1829
1830
/*****************************************************************************
1831
* Init and cleanup
1832
*****************************************************************************/
1833
1834
static __net_init int l2tp_init_net(struct net *net)
1835
{
1836
struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1837
1838
idr_init(&pn->l2tp_tunnel_idr);
1839
spin_lock_init(&pn->l2tp_tunnel_idr_lock);
1840
1841
idr_init(&pn->l2tp_v2_session_idr);
1842
idr_init(&pn->l2tp_v3_session_idr);
1843
spin_lock_init(&pn->l2tp_session_idr_lock);
1844
1845
return 0;
1846
}
1847
1848
static __net_exit void l2tp_pre_exit_net(struct net *net)
1849
{
1850
struct l2tp_net *pn = l2tp_pernet(net);
1851
struct l2tp_tunnel *tunnel = NULL;
1852
unsigned long tunnel_id, tmp;
1853
1854
rcu_read_lock_bh();
1855
idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
1856
if (tunnel)
1857
l2tp_tunnel_delete(tunnel);
1858
}
1859
rcu_read_unlock_bh();
1860
1861
if (l2tp_wq) {
1862
/* Run all TUNNEL_DELETE work items just queued. */
1863
__flush_workqueue(l2tp_wq);
1864
1865
/* Each TUNNEL_DELETE work item will queue a SESSION_DELETE
1866
* work item for each session in the tunnel. Flush the
1867
* workqueue again to process these.
1868
*/
1869
__flush_workqueue(l2tp_wq);
1870
}
1871
}
1872
1873
static int l2tp_idr_item_unexpected(int id, void *p, void *data)
1874
{
1875
const char *idr_name = data;
1876
1877
pr_err("l2tp: %s IDR not empty at net %d exit\n", idr_name, id);
1878
WARN_ON_ONCE(1);
1879
return 1;
1880
}
1881
1882
static __net_exit void l2tp_exit_net(struct net *net)
1883
{
1884
struct l2tp_net *pn = l2tp_pernet(net);
1885
1886
/* Our per-net IDRs should be empty. Check that is so, to
1887
* help catch cleanup races or refcnt leaks.
1888
*/
1889
idr_for_each(&pn->l2tp_v2_session_idr, l2tp_idr_item_unexpected,
1890
"v2_session");
1891
idr_for_each(&pn->l2tp_v3_session_idr, l2tp_idr_item_unexpected,
1892
"v3_session");
1893
idr_for_each(&pn->l2tp_tunnel_idr, l2tp_idr_item_unexpected,
1894
"tunnel");
1895
1896
idr_destroy(&pn->l2tp_v2_session_idr);
1897
idr_destroy(&pn->l2tp_v3_session_idr);
1898
idr_destroy(&pn->l2tp_tunnel_idr);
1899
}
1900
1901
static struct pernet_operations l2tp_net_ops = {
1902
.init = l2tp_init_net,
1903
.exit = l2tp_exit_net,
1904
.pre_exit = l2tp_pre_exit_net,
1905
.id = &l2tp_net_id,
1906
.size = sizeof(struct l2tp_net),
1907
};
1908
1909
static int __init l2tp_init(void)
1910
{
1911
int rc = 0;
1912
1913
rc = register_pernet_device(&l2tp_net_ops);
1914
if (rc)
1915
goto out;
1916
1917
l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1918
if (!l2tp_wq) {
1919
pr_err("alloc_workqueue failed\n");
1920
unregister_pernet_device(&l2tp_net_ops);
1921
rc = -ENOMEM;
1922
goto out;
1923
}
1924
1925
pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
1926
1927
out:
1928
return rc;
1929
}
1930
1931
static void __exit l2tp_exit(void)
1932
{
1933
unregister_pernet_device(&l2tp_net_ops);
1934
if (l2tp_wq) {
1935
destroy_workqueue(l2tp_wq);
1936
l2tp_wq = NULL;
1937
}
1938
}
1939
1940
module_init(l2tp_init);
1941
module_exit(l2tp_exit);
1942
1943
MODULE_AUTHOR("James Chapman <[email protected]>");
1944
MODULE_DESCRIPTION("L2TP core");
1945
MODULE_LICENSE("GPL");
1946
MODULE_VERSION(L2TP_DRV_VERSION);
1947
1948