Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/ipv4/ip_gre.c
49217 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Linux NET3: GRE over IP protocol decoder.
4
*
5
* Authors: Alexey Kuznetsov ([email protected])
6
*/
7
8
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10
#include <linux/capability.h>
11
#include <linux/module.h>
12
#include <linux/types.h>
13
#include <linux/kernel.h>
14
#include <linux/slab.h>
15
#include <linux/uaccess.h>
16
#include <linux/skbuff.h>
17
#include <linux/netdevice.h>
18
#include <linux/in.h>
19
#include <linux/tcp.h>
20
#include <linux/udp.h>
21
#include <linux/if_arp.h>
22
#include <linux/if_vlan.h>
23
#include <linux/init.h>
24
#include <linux/in6.h>
25
#include <linux/inetdevice.h>
26
#include <linux/igmp.h>
27
#include <linux/netfilter_ipv4.h>
28
#include <linux/etherdevice.h>
29
#include <linux/if_ether.h>
30
31
#include <net/flow.h>
32
#include <net/sock.h>
33
#include <net/ip.h>
34
#include <net/icmp.h>
35
#include <net/protocol.h>
36
#include <net/ip_tunnels.h>
37
#include <net/arp.h>
38
#include <net/checksum.h>
39
#include <net/dsfield.h>
40
#include <net/inet_ecn.h>
41
#include <net/xfrm.h>
42
#include <net/net_namespace.h>
43
#include <net/netns/generic.h>
44
#include <net/rtnetlink.h>
45
#include <net/gre.h>
46
#include <net/dst_metadata.h>
47
#include <net/erspan.h>
48
49
/*
50
Problems & solutions
51
--------------------
52
53
1. The most important issue is detecting local dead loops.
54
They would cause complete host lockup in transmit, which
55
would be "resolved" by stack overflow or, if queueing is enabled,
56
with infinite looping in net_bh.
57
58
We cannot track such dead loops during route installation,
59
it is infeasible task. The most general solutions would be
60
to keep skb->encapsulation counter (sort of local ttl),
61
and silently drop packet when it expires. It is a good
62
solution, but it supposes maintaining new variable in ALL
63
skb, even if no tunneling is used.
64
65
Current solution: xmit_recursion breaks dead loops. This is a percpu
66
counter, since when we enter the first ndo_xmit(), cpu migration is
67
forbidden. We force an exit if this counter reaches RECURSION_LIMIT
68
69
2. Networking dead loops would not kill routers, but would really
70
kill network. IP hop limit plays role of "t->recursion" in this case,
71
if we copy it from packet being encapsulated to upper header.
72
It is very good solution, but it introduces two problems:
73
74
- Routing protocols, using packets with ttl=1 (OSPF, RIP2),
75
do not work over tunnels.
76
- traceroute does not work. I planned to relay ICMP from tunnel,
77
so that this problem would be solved and traceroute output
78
would even more informative. This idea appeared to be wrong:
79
only Linux complies to rfc1812 now (yes, guys, Linux is the only
80
true router now :-)), all routers (at least, in neighbourhood of mine)
81
return only 8 bytes of payload. It is the end.
82
83
Hence, if we want that OSPF worked or traceroute said something reasonable,
84
we should search for another solution.
85
86
One of them is to parse packet trying to detect inner encapsulation
87
made by our node. It is difficult or even impossible, especially,
88
taking into account fragmentation. TO be short, ttl is not solution at all.
89
90
Current solution: The solution was UNEXPECTEDLY SIMPLE.
91
We force DF flag on tunnels with preconfigured hop limit,
92
that is ALL. :-) Well, it does not remove the problem completely,
93
but exponential growth of network traffic is changed to linear
94
(branches, that exceed pmtu are pruned) and tunnel mtu
95
rapidly degrades to value <68, where looping stops.
96
Yes, it is not good if there exists a router in the loop,
97
which does not force DF, even when encapsulating packets have DF set.
98
But it is not our problem! Nobody could accuse us, we made
99
all that we could make. Even if it is your gated who injected
100
fatal route to network, even if it were you who configured
101
fatal static route: you are innocent. :-)
102
103
Alexey Kuznetsov.
104
*/
105
106
static bool log_ecn_error = true;
107
module_param(log_ecn_error, bool, 0644);
108
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
109
110
static struct rtnl_link_ops ipgre_link_ops __read_mostly;
111
static const struct header_ops ipgre_header_ops;
112
113
static int ipgre_tunnel_init(struct net_device *dev);
114
static void erspan_build_header(struct sk_buff *skb,
115
u32 id, u32 index,
116
bool truncate, bool is_ipv4);
117
118
static unsigned int ipgre_net_id __read_mostly;
119
static unsigned int gre_tap_net_id __read_mostly;
120
static unsigned int erspan_net_id __read_mostly;
121
122
static int ipgre_err(struct sk_buff *skb, u32 info,
123
const struct tnl_ptk_info *tpi)
124
{
125
126
/* All the routers (except for Linux) return only
127
8 bytes of packet payload. It means, that precise relaying of
128
ICMP in the real Internet is absolutely infeasible.
129
130
Moreover, Cisco "wise men" put GRE key to the third word
131
in GRE header. It makes impossible maintaining even soft
132
state for keyed GRE tunnels with enabled checksum. Tell
133
them "thank you".
134
135
Well, I wonder, rfc1812 was written by Cisco employee,
136
what the hell these idiots break standards established
137
by themselves???
138
*/
139
struct net *net = dev_net(skb->dev);
140
struct ip_tunnel_net *itn;
141
const struct iphdr *iph;
142
const int type = icmp_hdr(skb)->type;
143
const int code = icmp_hdr(skb)->code;
144
struct ip_tunnel *t;
145
146
if (tpi->proto == htons(ETH_P_TEB))
147
itn = net_generic(net, gre_tap_net_id);
148
else if (tpi->proto == htons(ETH_P_ERSPAN) ||
149
tpi->proto == htons(ETH_P_ERSPAN2))
150
itn = net_generic(net, erspan_net_id);
151
else
152
itn = net_generic(net, ipgre_net_id);
153
154
iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
155
t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
156
iph->daddr, iph->saddr, tpi->key);
157
158
if (!t)
159
return -ENOENT;
160
161
switch (type) {
162
default:
163
case ICMP_PARAMETERPROB:
164
return 0;
165
166
case ICMP_DEST_UNREACH:
167
switch (code) {
168
case ICMP_SR_FAILED:
169
case ICMP_PORT_UNREACH:
170
/* Impossible event. */
171
return 0;
172
default:
173
/* All others are translated to HOST_UNREACH.
174
rfc2003 contains "deep thoughts" about NET_UNREACH,
175
I believe they are just ether pollution. --ANK
176
*/
177
break;
178
}
179
break;
180
181
case ICMP_TIME_EXCEEDED:
182
if (code != ICMP_EXC_TTL)
183
return 0;
184
break;
185
186
case ICMP_REDIRECT:
187
break;
188
}
189
190
#if IS_ENABLED(CONFIG_IPV6)
191
if (tpi->proto == htons(ETH_P_IPV6)) {
192
unsigned int data_len = 0;
193
194
if (type == ICMP_TIME_EXCEEDED)
195
data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
196
197
if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
198
type, data_len))
199
return 0;
200
}
201
#endif
202
203
if (t->parms.iph.daddr == 0 ||
204
ipv4_is_multicast(t->parms.iph.daddr))
205
return 0;
206
207
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
208
return 0;
209
210
if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
211
t->err_count++;
212
else
213
t->err_count = 1;
214
t->err_time = jiffies;
215
216
return 0;
217
}
218
219
static void gre_err(struct sk_buff *skb, u32 info)
220
{
221
/* All the routers (except for Linux) return only
222
* 8 bytes of packet payload. It means, that precise relaying of
223
* ICMP in the real Internet is absolutely infeasible.
224
*
225
* Moreover, Cisco "wise men" put GRE key to the third word
226
* in GRE header. It makes impossible maintaining even soft
227
* state for keyed
228
* GRE tunnels with enabled checksum. Tell them "thank you".
229
*
230
* Well, I wonder, rfc1812 was written by Cisco employee,
231
* what the hell these idiots break standards established
232
* by themselves???
233
*/
234
235
const struct iphdr *iph = (struct iphdr *)skb->data;
236
const int type = icmp_hdr(skb)->type;
237
const int code = icmp_hdr(skb)->code;
238
struct tnl_ptk_info tpi;
239
240
if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
241
iph->ihl * 4) < 0)
242
return;
243
244
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
245
ipv4_update_pmtu(skb, dev_net(skb->dev), info,
246
skb->dev->ifindex, IPPROTO_GRE);
247
return;
248
}
249
if (type == ICMP_REDIRECT) {
250
ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
251
IPPROTO_GRE);
252
return;
253
}
254
255
ipgre_err(skb, info, &tpi);
256
}
257
258
static bool is_erspan_type1(int gre_hdr_len)
259
{
260
/* Both ERSPAN type I (version 0) and type II (version 1) use
261
* protocol 0x88BE, but the type I has only 4-byte GRE header,
262
* while type II has 8-byte.
263
*/
264
return gre_hdr_len == 4;
265
}
266
267
static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
268
int gre_hdr_len)
269
{
270
struct net *net = dev_net(skb->dev);
271
struct metadata_dst *tun_dst = NULL;
272
struct erspan_base_hdr *ershdr;
273
IP_TUNNEL_DECLARE_FLAGS(flags);
274
struct ip_tunnel_net *itn;
275
struct ip_tunnel *tunnel;
276
const struct iphdr *iph;
277
struct erspan_md2 *md2;
278
int ver;
279
int len;
280
281
ip_tunnel_flags_copy(flags, tpi->flags);
282
283
itn = net_generic(net, erspan_net_id);
284
iph = ip_hdr(skb);
285
if (is_erspan_type1(gre_hdr_len)) {
286
ver = 0;
287
__set_bit(IP_TUNNEL_NO_KEY_BIT, flags);
288
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
289
iph->saddr, iph->daddr, 0);
290
} else {
291
if (unlikely(!pskb_may_pull(skb,
292
gre_hdr_len + sizeof(*ershdr))))
293
return PACKET_REJECT;
294
295
ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
296
ver = ershdr->ver;
297
iph = ip_hdr(skb);
298
__set_bit(IP_TUNNEL_KEY_BIT, flags);
299
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, flags,
300
iph->saddr, iph->daddr, tpi->key);
301
}
302
303
if (tunnel) {
304
if (is_erspan_type1(gre_hdr_len))
305
len = gre_hdr_len;
306
else
307
len = gre_hdr_len + erspan_hdr_len(ver);
308
309
if (unlikely(!pskb_may_pull(skb, len)))
310
return PACKET_REJECT;
311
312
if (__iptunnel_pull_header(skb,
313
len,
314
htons(ETH_P_TEB),
315
false, false) < 0)
316
goto drop;
317
318
if (tunnel->collect_md) {
319
struct erspan_metadata *pkt_md, *md;
320
struct ip_tunnel_info *info;
321
unsigned char *gh;
322
__be64 tun_id;
323
324
__set_bit(IP_TUNNEL_KEY_BIT, tpi->flags);
325
ip_tunnel_flags_copy(flags, tpi->flags);
326
tun_id = key32_to_tunnel_id(tpi->key);
327
328
tun_dst = ip_tun_rx_dst(skb, flags,
329
tun_id, sizeof(*md));
330
if (!tun_dst)
331
return PACKET_REJECT;
332
333
/* MUST set options_len before referencing options */
334
info = &tun_dst->u.tun_info;
335
info->options_len = sizeof(*md);
336
337
/* skb can be uncloned in __iptunnel_pull_header, so
338
* old pkt_md is no longer valid and we need to reset
339
* it
340
*/
341
gh = skb_network_header(skb) +
342
skb_network_header_len(skb);
343
pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
344
sizeof(*ershdr));
345
md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
346
md->version = ver;
347
md2 = &md->u.md2;
348
memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
349
ERSPAN_V2_MDSIZE);
350
351
__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
352
info->key.tun_flags);
353
}
354
355
skb_reset_mac_header(skb);
356
ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
357
return PACKET_RCVD;
358
}
359
return PACKET_REJECT;
360
361
drop:
362
kfree_skb(skb);
363
return PACKET_RCVD;
364
}
365
366
static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
367
struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
368
{
369
struct metadata_dst *tun_dst = NULL;
370
const struct iphdr *iph;
371
struct ip_tunnel *tunnel;
372
373
iph = ip_hdr(skb);
374
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
375
iph->saddr, iph->daddr, tpi->key);
376
377
if (tunnel) {
378
const struct iphdr *tnl_params;
379
380
if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
381
raw_proto, false) < 0)
382
goto drop;
383
384
/* Special case for ipgre_header_parse(), which expects the
385
* mac_header to point to the outer IP header.
386
*/
387
if (tunnel->dev->header_ops == &ipgre_header_ops)
388
skb_pop_mac_header(skb);
389
else
390
skb_reset_mac_header(skb);
391
392
tnl_params = &tunnel->parms.iph;
393
if (tunnel->collect_md || tnl_params->daddr == 0) {
394
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
395
__be64 tun_id;
396
397
__set_bit(IP_TUNNEL_CSUM_BIT, flags);
398
__set_bit(IP_TUNNEL_KEY_BIT, flags);
399
ip_tunnel_flags_and(flags, tpi->flags, flags);
400
401
tun_id = key32_to_tunnel_id(tpi->key);
402
tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
403
if (!tun_dst)
404
return PACKET_REJECT;
405
}
406
407
ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
408
return PACKET_RCVD;
409
}
410
return PACKET_NEXT;
411
412
drop:
413
kfree_skb(skb);
414
return PACKET_RCVD;
415
}
416
417
static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
418
int hdr_len)
419
{
420
struct net *net = dev_net(skb->dev);
421
struct ip_tunnel_net *itn;
422
int res;
423
424
if (tpi->proto == htons(ETH_P_TEB))
425
itn = net_generic(net, gre_tap_net_id);
426
else
427
itn = net_generic(net, ipgre_net_id);
428
429
res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
430
if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
431
/* ipgre tunnels in collect metadata mode should receive
432
* also ETH_P_TEB traffic.
433
*/
434
itn = net_generic(net, ipgre_net_id);
435
res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
436
}
437
return res;
438
}
439
440
static int gre_rcv(struct sk_buff *skb)
441
{
442
struct tnl_ptk_info tpi;
443
bool csum_err = false;
444
int hdr_len;
445
446
#ifdef CONFIG_NET_IPGRE_BROADCAST
447
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
448
/* Looped back packet, drop it! */
449
if (rt_is_output_route(skb_rtable(skb)))
450
goto drop;
451
}
452
#endif
453
454
hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
455
if (hdr_len < 0)
456
goto drop;
457
458
if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
459
tpi.proto == htons(ETH_P_ERSPAN2))) {
460
if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
461
return 0;
462
goto out;
463
}
464
465
if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
466
return 0;
467
468
out:
469
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
470
drop:
471
kfree_skb(skb);
472
return 0;
473
}
474
475
static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
476
const struct iphdr *tnl_params,
477
__be16 proto)
478
{
479
struct ip_tunnel *tunnel = netdev_priv(dev);
480
IP_TUNNEL_DECLARE_FLAGS(flags);
481
482
ip_tunnel_flags_copy(flags, tunnel->parms.o_flags);
483
484
/* Push GRE header. */
485
gre_build_header(skb, tunnel->tun_hlen,
486
flags, proto, tunnel->parms.o_key,
487
test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
488
htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
489
490
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
491
}
492
493
static int gre_handle_offloads(struct sk_buff *skb, bool csum)
494
{
495
return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
496
}
497
498
static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
499
__be16 proto)
500
{
501
struct ip_tunnel *tunnel = netdev_priv(dev);
502
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
503
struct ip_tunnel_info *tun_info;
504
const struct ip_tunnel_key *key;
505
int tunnel_hlen;
506
507
tun_info = skb_tunnel_info(skb);
508
if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
509
ip_tunnel_info_af(tun_info) != AF_INET))
510
goto err_free_skb;
511
512
key = &tun_info->key;
513
tunnel_hlen = gre_calc_hlen(key->tun_flags);
514
515
if (skb_cow_head(skb, dev->needed_headroom))
516
goto err_free_skb;
517
518
/* Push Tunnel header. */
519
if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
520
tunnel->parms.o_flags)))
521
goto err_free_skb;
522
523
__set_bit(IP_TUNNEL_CSUM_BIT, flags);
524
__set_bit(IP_TUNNEL_KEY_BIT, flags);
525
__set_bit(IP_TUNNEL_SEQ_BIT, flags);
526
ip_tunnel_flags_and(flags, tun_info->key.tun_flags, flags);
527
528
gre_build_header(skb, tunnel_hlen, flags, proto,
529
tunnel_id_to_key32(tun_info->key.tun_id),
530
test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
531
htonl(atomic_fetch_inc(&tunnel->o_seqno)) : 0);
532
533
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
534
535
return;
536
537
err_free_skb:
538
kfree_skb(skb);
539
DEV_STATS_INC(dev, tx_dropped);
540
}
541
542
static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
543
{
544
struct ip_tunnel *tunnel = netdev_priv(dev);
545
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
546
struct ip_tunnel_info *tun_info;
547
const struct ip_tunnel_key *key;
548
struct erspan_metadata *md;
549
bool truncate = false;
550
__be16 proto;
551
int tunnel_hlen;
552
int version;
553
int nhoff;
554
555
tun_info = skb_tunnel_info(skb);
556
if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
557
ip_tunnel_info_af(tun_info) != AF_INET))
558
goto err_free_skb;
559
560
key = &tun_info->key;
561
if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT, tun_info->key.tun_flags))
562
goto err_free_skb;
563
if (tun_info->options_len < sizeof(*md))
564
goto err_free_skb;
565
md = ip_tunnel_info_opts(tun_info);
566
567
/* ERSPAN has fixed 8 byte GRE header */
568
version = md->version;
569
tunnel_hlen = 8 + erspan_hdr_len(version);
570
571
if (skb_cow_head(skb, dev->needed_headroom))
572
goto err_free_skb;
573
574
if (gre_handle_offloads(skb, false))
575
goto err_free_skb;
576
577
if (skb->len > dev->mtu + dev->hard_header_len) {
578
if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
579
goto err_free_skb;
580
truncate = true;
581
}
582
583
nhoff = skb_network_offset(skb);
584
if (skb->protocol == htons(ETH_P_IP) &&
585
(ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
586
truncate = true;
587
588
if (skb->protocol == htons(ETH_P_IPV6)) {
589
int thoff;
590
591
if (skb_transport_header_was_set(skb))
592
thoff = skb_transport_offset(skb);
593
else
594
thoff = nhoff + sizeof(struct ipv6hdr);
595
if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
596
truncate = true;
597
}
598
599
if (version == 1) {
600
erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
601
ntohl(md->u.index), truncate, true);
602
proto = htons(ETH_P_ERSPAN);
603
} else if (version == 2) {
604
erspan_build_header_v2(skb,
605
ntohl(tunnel_id_to_key32(key->tun_id)),
606
md->u.md2.dir,
607
get_hwid(&md->u.md2),
608
truncate, true);
609
proto = htons(ETH_P_ERSPAN2);
610
} else {
611
goto err_free_skb;
612
}
613
614
__set_bit(IP_TUNNEL_SEQ_BIT, flags);
615
gre_build_header(skb, 8, flags, proto, 0,
616
htonl(atomic_fetch_inc(&tunnel->o_seqno)));
617
618
ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
619
620
return;
621
622
err_free_skb:
623
kfree_skb(skb);
624
DEV_STATS_INC(dev, tx_dropped);
625
}
626
627
static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
628
{
629
struct ip_tunnel_info *info = skb_tunnel_info(skb);
630
const struct ip_tunnel_key *key;
631
struct rtable *rt;
632
struct flowi4 fl4;
633
634
if (ip_tunnel_info_af(info) != AF_INET)
635
return -EINVAL;
636
637
key = &info->key;
638
ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
639
tunnel_id_to_key32(key->tun_id),
640
key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
641
skb->mark, skb_get_hash(skb), key->flow_flags);
642
rt = ip_route_output_key(dev_net(dev), &fl4);
643
if (IS_ERR(rt))
644
return PTR_ERR(rt);
645
646
ip_rt_put(rt);
647
info->key.u.ipv4.src = fl4.saddr;
648
return 0;
649
}
650
651
static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
652
struct net_device *dev)
653
{
654
struct ip_tunnel *tunnel = netdev_priv(dev);
655
const struct iphdr *tnl_params;
656
657
if (!pskb_inet_may_pull(skb))
658
goto free_skb;
659
660
if (tunnel->collect_md) {
661
gre_fb_xmit(skb, dev, skb->protocol);
662
return NETDEV_TX_OK;
663
}
664
665
if (dev->header_ops) {
666
int pull_len = tunnel->hlen + sizeof(struct iphdr);
667
668
if (skb_cow_head(skb, 0))
669
goto free_skb;
670
671
if (!pskb_may_pull(skb, pull_len))
672
goto free_skb;
673
674
tnl_params = (const struct iphdr *)skb->data;
675
676
/* ip_tunnel_xmit() needs skb->data pointing to gre header. */
677
skb_pull(skb, pull_len);
678
skb_reset_mac_header(skb);
679
680
if (skb->ip_summed == CHECKSUM_PARTIAL &&
681
skb_checksum_start(skb) < skb->data)
682
goto free_skb;
683
} else {
684
if (skb_cow_head(skb, dev->needed_headroom))
685
goto free_skb;
686
687
tnl_params = &tunnel->parms.iph;
688
}
689
690
if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
691
tunnel->parms.o_flags)))
692
goto free_skb;
693
694
__gre_xmit(skb, dev, tnl_params, skb->protocol);
695
return NETDEV_TX_OK;
696
697
free_skb:
698
kfree_skb(skb);
699
DEV_STATS_INC(dev, tx_dropped);
700
return NETDEV_TX_OK;
701
}
702
703
static netdev_tx_t erspan_xmit(struct sk_buff *skb,
704
struct net_device *dev)
705
{
706
struct ip_tunnel *tunnel = netdev_priv(dev);
707
bool truncate = false;
708
__be16 proto;
709
710
if (!pskb_inet_may_pull(skb))
711
goto free_skb;
712
713
if (tunnel->collect_md) {
714
erspan_fb_xmit(skb, dev);
715
return NETDEV_TX_OK;
716
}
717
718
if (gre_handle_offloads(skb, false))
719
goto free_skb;
720
721
if (skb_cow_head(skb, dev->needed_headroom))
722
goto free_skb;
723
724
if (skb->len > dev->mtu + dev->hard_header_len) {
725
if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
726
goto free_skb;
727
truncate = true;
728
}
729
730
/* Push ERSPAN header */
731
if (tunnel->erspan_ver == 0) {
732
proto = htons(ETH_P_ERSPAN);
733
__clear_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags);
734
} else if (tunnel->erspan_ver == 1) {
735
erspan_build_header(skb, ntohl(tunnel->parms.o_key),
736
tunnel->index,
737
truncate, true);
738
proto = htons(ETH_P_ERSPAN);
739
} else if (tunnel->erspan_ver == 2) {
740
erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
741
tunnel->dir, tunnel->hwid,
742
truncate, true);
743
proto = htons(ETH_P_ERSPAN2);
744
} else {
745
goto free_skb;
746
}
747
748
__clear_bit(IP_TUNNEL_KEY_BIT, tunnel->parms.o_flags);
749
__gre_xmit(skb, dev, &tunnel->parms.iph, proto);
750
return NETDEV_TX_OK;
751
752
free_skb:
753
kfree_skb(skb);
754
DEV_STATS_INC(dev, tx_dropped);
755
return NETDEV_TX_OK;
756
}
757
758
static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
759
struct net_device *dev)
760
{
761
struct ip_tunnel *tunnel = netdev_priv(dev);
762
763
if (!pskb_inet_may_pull(skb))
764
goto free_skb;
765
766
if (tunnel->collect_md) {
767
gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
768
return NETDEV_TX_OK;
769
}
770
771
if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
772
tunnel->parms.o_flags)))
773
goto free_skb;
774
775
if (skb_cow_head(skb, dev->needed_headroom))
776
goto free_skb;
777
778
__gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
779
return NETDEV_TX_OK;
780
781
free_skb:
782
kfree_skb(skb);
783
DEV_STATS_INC(dev, tx_dropped);
784
return NETDEV_TX_OK;
785
}
786
787
static void ipgre_link_update(struct net_device *dev, bool set_mtu)
788
{
789
struct ip_tunnel *tunnel = netdev_priv(dev);
790
int len;
791
792
len = tunnel->tun_hlen;
793
tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
794
len = tunnel->tun_hlen - len;
795
tunnel->hlen = tunnel->hlen + len;
796
797
if (dev->header_ops)
798
dev->hard_header_len += len;
799
else
800
dev->needed_headroom += len;
801
802
if (set_mtu)
803
WRITE_ONCE(dev->mtu, max_t(int, dev->mtu - len, 68));
804
805
if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags) ||
806
(test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
807
tunnel->encap.type != TUNNEL_ENCAP_NONE)) {
808
dev->features &= ~NETIF_F_GSO_SOFTWARE;
809
dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
810
} else {
811
dev->features |= NETIF_F_GSO_SOFTWARE;
812
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
813
}
814
}
815
816
static int ipgre_tunnel_ctl(struct net_device *dev,
817
struct ip_tunnel_parm_kern *p,
818
int cmd)
819
{
820
__be16 i_flags, o_flags;
821
int err;
822
823
if (!ip_tunnel_flags_is_be16_compat(p->i_flags) ||
824
!ip_tunnel_flags_is_be16_compat(p->o_flags))
825
return -EOVERFLOW;
826
827
i_flags = ip_tunnel_flags_to_be16(p->i_flags);
828
o_flags = ip_tunnel_flags_to_be16(p->o_flags);
829
830
if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
831
if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
832
p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
833
((i_flags | o_flags) & (GRE_VERSION | GRE_ROUTING)))
834
return -EINVAL;
835
}
836
837
gre_flags_to_tnl_flags(p->i_flags, i_flags);
838
gre_flags_to_tnl_flags(p->o_flags, o_flags);
839
840
err = ip_tunnel_ctl(dev, p, cmd);
841
if (err)
842
return err;
843
844
if (cmd == SIOCCHGTUNNEL) {
845
struct ip_tunnel *t = netdev_priv(dev);
846
847
ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags);
848
ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags);
849
850
if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
851
ipgre_link_update(dev, true);
852
}
853
854
i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
855
ip_tunnel_flags_from_be16(p->i_flags, i_flags);
856
o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
857
ip_tunnel_flags_from_be16(p->o_flags, o_flags);
858
859
return 0;
860
}
861
862
/* Nice toy. Unfortunately, useless in real life :-)
863
It allows to construct virtual multiprotocol broadcast "LAN"
864
over the Internet, provided multicast routing is tuned.
865
866
867
I have no idea was this bicycle invented before me,
868
so that I had to set ARPHRD_IPGRE to a random value.
869
I have an impression, that Cisco could make something similar,
870
but this feature is apparently missing in IOS<=11.2(8).
871
872
I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
873
with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
874
875
ping -t 255 224.66.66.66
876
877
If nobody answers, mbone does not work.
878
879
ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
880
ip addr add 10.66.66.<somewhat>/24 dev Universe
881
ifconfig Universe up
882
ifconfig Universe add fe80::<Your_real_addr>/10
883
ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
884
ftp 10.66.66.66
885
...
886
ftp fec0:6666:6666::193.233.7.65
887
...
888
*/
889
static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
890
unsigned short type,
891
const void *daddr, const void *saddr, unsigned int len)
892
{
893
struct ip_tunnel *t = netdev_priv(dev);
894
struct gre_base_hdr *greh;
895
struct iphdr *iph;
896
int needed;
897
898
needed = t->hlen + sizeof(*iph);
899
if (skb_headroom(skb) < needed &&
900
pskb_expand_head(skb, HH_DATA_ALIGN(needed - skb_headroom(skb)),
901
0, GFP_ATOMIC))
902
return -needed;
903
904
iph = skb_push(skb, needed);
905
greh = (struct gre_base_hdr *)(iph+1);
906
greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
907
greh->protocol = htons(type);
908
909
memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
910
911
/* Set the source hardware address. */
912
if (saddr)
913
memcpy(&iph->saddr, saddr, 4);
914
if (daddr)
915
memcpy(&iph->daddr, daddr, 4);
916
if (iph->daddr)
917
return t->hlen + sizeof(*iph);
918
919
return -(t->hlen + sizeof(*iph));
920
}
921
922
static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
923
{
924
const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
925
memcpy(haddr, &iph->saddr, 4);
926
return 4;
927
}
928
929
static const struct header_ops ipgre_header_ops = {
930
.create = ipgre_header,
931
.parse = ipgre_header_parse,
932
};
933
934
#ifdef CONFIG_NET_IPGRE_BROADCAST
935
static int ipgre_open(struct net_device *dev)
936
{
937
struct ip_tunnel *t = netdev_priv(dev);
938
939
if (ipv4_is_multicast(t->parms.iph.daddr)) {
940
struct flowi4 fl4 = {
941
.flowi4_oif = t->parms.link,
942
.flowi4_dscp = ip4h_dscp(&t->parms.iph),
943
.flowi4_scope = RT_SCOPE_UNIVERSE,
944
.flowi4_proto = IPPROTO_GRE,
945
.saddr = t->parms.iph.saddr,
946
.daddr = t->parms.iph.daddr,
947
.fl4_gre_key = t->parms.o_key,
948
};
949
struct rtable *rt;
950
951
rt = ip_route_output_key(t->net, &fl4);
952
if (IS_ERR(rt))
953
return -EADDRNOTAVAIL;
954
dev = rt->dst.dev;
955
ip_rt_put(rt);
956
if (!__in_dev_get_rtnl(dev))
957
return -EADDRNOTAVAIL;
958
t->mlink = dev->ifindex;
959
ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
960
}
961
return 0;
962
}
963
964
static int ipgre_close(struct net_device *dev)
965
{
966
struct ip_tunnel *t = netdev_priv(dev);
967
968
if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
969
struct in_device *in_dev;
970
in_dev = inetdev_by_index(t->net, t->mlink);
971
if (in_dev)
972
ip_mc_dec_group(in_dev, t->parms.iph.daddr);
973
}
974
return 0;
975
}
976
#endif
977
978
static const struct net_device_ops ipgre_netdev_ops = {
979
.ndo_init = ipgre_tunnel_init,
980
.ndo_uninit = ip_tunnel_uninit,
981
#ifdef CONFIG_NET_IPGRE_BROADCAST
982
.ndo_open = ipgre_open,
983
.ndo_stop = ipgre_close,
984
#endif
985
.ndo_start_xmit = ipgre_xmit,
986
.ndo_siocdevprivate = ip_tunnel_siocdevprivate,
987
.ndo_change_mtu = ip_tunnel_change_mtu,
988
.ndo_get_stats64 = dev_get_tstats64,
989
.ndo_get_iflink = ip_tunnel_get_iflink,
990
.ndo_tunnel_ctl = ipgre_tunnel_ctl,
991
};
992
993
#define GRE_FEATURES (NETIF_F_SG | \
994
NETIF_F_FRAGLIST | \
995
NETIF_F_HIGHDMA | \
996
NETIF_F_HW_CSUM)
997
998
static void ipgre_tunnel_setup(struct net_device *dev)
999
{
1000
dev->netdev_ops = &ipgre_netdev_ops;
1001
dev->type = ARPHRD_IPGRE;
1002
ip_tunnel_setup(dev, ipgre_net_id);
1003
}
1004
1005
static void __gre_tunnel_init(struct net_device *dev)
1006
{
1007
struct ip_tunnel *tunnel;
1008
1009
tunnel = netdev_priv(dev);
1010
tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1011
tunnel->parms.iph.protocol = IPPROTO_GRE;
1012
1013
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1014
dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
1015
1016
dev->features |= GRE_FEATURES;
1017
dev->hw_features |= GRE_FEATURES;
1018
1019
/* TCP offload with GRE SEQ is not supported, nor can we support 2
1020
* levels of outer headers requiring an update.
1021
*/
1022
if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags))
1023
return;
1024
if (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
1025
tunnel->encap.type != TUNNEL_ENCAP_NONE)
1026
return;
1027
1028
dev->features |= NETIF_F_GSO_SOFTWARE;
1029
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1030
1031
dev->lltx = true;
1032
}
1033
1034
static int ipgre_tunnel_init(struct net_device *dev)
1035
{
1036
struct ip_tunnel *tunnel = netdev_priv(dev);
1037
struct iphdr *iph = &tunnel->parms.iph;
1038
1039
__gre_tunnel_init(dev);
1040
1041
__dev_addr_set(dev, &iph->saddr, 4);
1042
memcpy(dev->broadcast, &iph->daddr, 4);
1043
1044
dev->flags = IFF_NOARP;
1045
netif_keep_dst(dev);
1046
dev->addr_len = 4;
1047
1048
if (iph->daddr && !tunnel->collect_md) {
1049
#ifdef CONFIG_NET_IPGRE_BROADCAST
1050
if (ipv4_is_multicast(iph->daddr)) {
1051
if (!iph->saddr)
1052
return -EINVAL;
1053
dev->flags = IFF_BROADCAST;
1054
dev->header_ops = &ipgre_header_ops;
1055
dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1056
dev->needed_headroom = 0;
1057
}
1058
#endif
1059
} else if (!tunnel->collect_md) {
1060
dev->header_ops = &ipgre_header_ops;
1061
dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1062
dev->needed_headroom = 0;
1063
}
1064
1065
return ip_tunnel_init(dev);
1066
}
1067
1068
static const struct gre_protocol ipgre_protocol = {
1069
.handler = gre_rcv,
1070
.err_handler = gre_err,
1071
};
1072
1073
static int __net_init ipgre_init_net(struct net *net)
1074
{
1075
return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1076
}
1077
1078
static void __net_exit ipgre_exit_rtnl(struct net *net,
1079
struct list_head *dev_to_kill)
1080
{
1081
ip_tunnel_delete_net(net, ipgre_net_id, &ipgre_link_ops, dev_to_kill);
1082
}
1083
1084
static struct pernet_operations ipgre_net_ops = {
1085
.init = ipgre_init_net,
1086
.exit_rtnl = ipgre_exit_rtnl,
1087
.id = &ipgre_net_id,
1088
.size = sizeof(struct ip_tunnel_net),
1089
};
1090
1091
static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1092
struct netlink_ext_ack *extack)
1093
{
1094
__be16 flags;
1095
1096
if (!data)
1097
return 0;
1098
1099
flags = 0;
1100
if (data[IFLA_GRE_IFLAGS])
1101
flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1102
if (data[IFLA_GRE_OFLAGS])
1103
flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1104
if (flags & (GRE_VERSION|GRE_ROUTING))
1105
return -EINVAL;
1106
1107
if (data[IFLA_GRE_COLLECT_METADATA] &&
1108
data[IFLA_GRE_ENCAP_TYPE] &&
1109
nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1110
return -EINVAL;
1111
1112
return 0;
1113
}
1114
1115
static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1116
struct netlink_ext_ack *extack)
1117
{
1118
__be32 daddr;
1119
1120
if (tb[IFLA_ADDRESS]) {
1121
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1122
return -EINVAL;
1123
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1124
return -EADDRNOTAVAIL;
1125
}
1126
1127
if (!data)
1128
goto out;
1129
1130
if (data[IFLA_GRE_REMOTE]) {
1131
memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1132
if (!daddr)
1133
return -EINVAL;
1134
}
1135
1136
out:
1137
return ipgre_tunnel_validate(tb, data, extack);
1138
}
1139
1140
static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1141
struct netlink_ext_ack *extack)
1142
{
1143
__be16 flags = 0;
1144
int ret;
1145
1146
if (!data)
1147
return 0;
1148
1149
ret = ipgre_tap_validate(tb, data, extack);
1150
if (ret)
1151
return ret;
1152
1153
if (data[IFLA_GRE_ERSPAN_VER] &&
1154
nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1155
return 0;
1156
1157
/* ERSPAN type II/III should only have GRE sequence and key flag */
1158
if (data[IFLA_GRE_OFLAGS])
1159
flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1160
if (data[IFLA_GRE_IFLAGS])
1161
flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1162
if (!data[IFLA_GRE_COLLECT_METADATA] &&
1163
flags != (GRE_SEQ | GRE_KEY))
1164
return -EINVAL;
1165
1166
/* ERSPAN Session ID only has 10-bit. Since we reuse
1167
* 32-bit key field as ID, check it's range.
1168
*/
1169
if (data[IFLA_GRE_IKEY] &&
1170
(ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1171
return -EINVAL;
1172
1173
if (data[IFLA_GRE_OKEY] &&
1174
(ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1175
return -EINVAL;
1176
1177
return 0;
1178
}
1179
1180
static int ipgre_netlink_parms(struct net_device *dev,
1181
struct nlattr *data[],
1182
struct nlattr *tb[],
1183
struct ip_tunnel_parm_kern *parms,
1184
__u32 *fwmark)
1185
{
1186
struct ip_tunnel *t = netdev_priv(dev);
1187
1188
memset(parms, 0, sizeof(*parms));
1189
1190
parms->iph.protocol = IPPROTO_GRE;
1191
1192
if (!data)
1193
return 0;
1194
1195
if (data[IFLA_GRE_LINK])
1196
parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1197
1198
if (data[IFLA_GRE_IFLAGS])
1199
gre_flags_to_tnl_flags(parms->i_flags,
1200
nla_get_be16(data[IFLA_GRE_IFLAGS]));
1201
1202
if (data[IFLA_GRE_OFLAGS])
1203
gre_flags_to_tnl_flags(parms->o_flags,
1204
nla_get_be16(data[IFLA_GRE_OFLAGS]));
1205
1206
if (data[IFLA_GRE_IKEY])
1207
parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1208
1209
if (data[IFLA_GRE_OKEY])
1210
parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1211
1212
if (data[IFLA_GRE_LOCAL])
1213
parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1214
1215
if (data[IFLA_GRE_REMOTE])
1216
parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1217
1218
if (data[IFLA_GRE_TTL])
1219
parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1220
1221
if (data[IFLA_GRE_TOS])
1222
parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1223
1224
if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1225
if (t->ignore_df)
1226
return -EINVAL;
1227
parms->iph.frag_off = htons(IP_DF);
1228
}
1229
1230
if (data[IFLA_GRE_COLLECT_METADATA]) {
1231
t->collect_md = true;
1232
if (dev->type == ARPHRD_IPGRE)
1233
dev->type = ARPHRD_NONE;
1234
}
1235
1236
if (data[IFLA_GRE_IGNORE_DF]) {
1237
if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1238
&& (parms->iph.frag_off & htons(IP_DF)))
1239
return -EINVAL;
1240
t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1241
}
1242
1243
if (data[IFLA_GRE_FWMARK])
1244
*fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1245
1246
return 0;
1247
}
1248
1249
static int erspan_netlink_parms(struct net_device *dev,
1250
struct nlattr *data[],
1251
struct nlattr *tb[],
1252
struct ip_tunnel_parm_kern *parms,
1253
__u32 *fwmark)
1254
{
1255
struct ip_tunnel *t = netdev_priv(dev);
1256
int err;
1257
1258
err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1259
if (err)
1260
return err;
1261
if (!data)
1262
return 0;
1263
1264
if (data[IFLA_GRE_ERSPAN_VER]) {
1265
t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1266
1267
if (t->erspan_ver > 2)
1268
return -EINVAL;
1269
}
1270
1271
if (t->erspan_ver == 1) {
1272
if (data[IFLA_GRE_ERSPAN_INDEX]) {
1273
t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1274
if (t->index & ~INDEX_MASK)
1275
return -EINVAL;
1276
}
1277
} else if (t->erspan_ver == 2) {
1278
if (data[IFLA_GRE_ERSPAN_DIR]) {
1279
t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1280
if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1281
return -EINVAL;
1282
}
1283
if (data[IFLA_GRE_ERSPAN_HWID]) {
1284
t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1285
if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1286
return -EINVAL;
1287
}
1288
}
1289
1290
return 0;
1291
}
1292
1293
/* This function returns true when ENCAP attributes are present in the nl msg */
1294
static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1295
struct ip_tunnel_encap *ipencap)
1296
{
1297
bool ret = false;
1298
1299
memset(ipencap, 0, sizeof(*ipencap));
1300
1301
if (!data)
1302
return ret;
1303
1304
if (data[IFLA_GRE_ENCAP_TYPE]) {
1305
ret = true;
1306
ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1307
}
1308
1309
if (data[IFLA_GRE_ENCAP_FLAGS]) {
1310
ret = true;
1311
ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1312
}
1313
1314
if (data[IFLA_GRE_ENCAP_SPORT]) {
1315
ret = true;
1316
ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1317
}
1318
1319
if (data[IFLA_GRE_ENCAP_DPORT]) {
1320
ret = true;
1321
ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1322
}
1323
1324
return ret;
1325
}
1326
1327
static int gre_tap_init(struct net_device *dev)
1328
{
1329
__gre_tunnel_init(dev);
1330
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1331
netif_keep_dst(dev);
1332
1333
return ip_tunnel_init(dev);
1334
}
1335
1336
static const struct net_device_ops gre_tap_netdev_ops = {
1337
.ndo_init = gre_tap_init,
1338
.ndo_uninit = ip_tunnel_uninit,
1339
.ndo_start_xmit = gre_tap_xmit,
1340
.ndo_set_mac_address = eth_mac_addr,
1341
.ndo_validate_addr = eth_validate_addr,
1342
.ndo_change_mtu = ip_tunnel_change_mtu,
1343
.ndo_get_stats64 = dev_get_tstats64,
1344
.ndo_get_iflink = ip_tunnel_get_iflink,
1345
.ndo_fill_metadata_dst = gre_fill_metadata_dst,
1346
};
1347
1348
static int erspan_tunnel_init(struct net_device *dev)
1349
{
1350
struct ip_tunnel *tunnel = netdev_priv(dev);
1351
1352
if (tunnel->erspan_ver == 0)
1353
tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1354
else
1355
tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1356
1357
tunnel->parms.iph.protocol = IPPROTO_GRE;
1358
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1359
erspan_hdr_len(tunnel->erspan_ver);
1360
1361
dev->features |= GRE_FEATURES;
1362
dev->hw_features |= GRE_FEATURES;
1363
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1364
netif_keep_dst(dev);
1365
1366
return ip_tunnel_init(dev);
1367
}
1368
1369
static const struct net_device_ops erspan_netdev_ops = {
1370
.ndo_init = erspan_tunnel_init,
1371
.ndo_uninit = ip_tunnel_uninit,
1372
.ndo_start_xmit = erspan_xmit,
1373
.ndo_set_mac_address = eth_mac_addr,
1374
.ndo_validate_addr = eth_validate_addr,
1375
.ndo_change_mtu = ip_tunnel_change_mtu,
1376
.ndo_get_stats64 = dev_get_tstats64,
1377
.ndo_get_iflink = ip_tunnel_get_iflink,
1378
.ndo_fill_metadata_dst = gre_fill_metadata_dst,
1379
};
1380
1381
static void ipgre_tap_setup(struct net_device *dev)
1382
{
1383
ether_setup(dev);
1384
dev->max_mtu = 0;
1385
dev->netdev_ops = &gre_tap_netdev_ops;
1386
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1387
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1388
ip_tunnel_setup(dev, gre_tap_net_id);
1389
}
1390
1391
static int
1392
ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1393
{
1394
struct ip_tunnel_encap ipencap;
1395
1396
if (ipgre_netlink_encap_parms(data, &ipencap)) {
1397
struct ip_tunnel *t = netdev_priv(dev);
1398
int err = ip_tunnel_encap_setup(t, &ipencap);
1399
1400
if (err < 0)
1401
return err;
1402
}
1403
1404
return 0;
1405
}
1406
1407
static int ipgre_newlink(struct net_device *dev,
1408
struct rtnl_newlink_params *params,
1409
struct netlink_ext_ack *extack)
1410
{
1411
struct nlattr **data = params->data;
1412
struct nlattr **tb = params->tb;
1413
struct ip_tunnel_parm_kern p;
1414
__u32 fwmark = 0;
1415
int err;
1416
1417
err = ipgre_newlink_encap_setup(dev, data);
1418
if (err)
1419
return err;
1420
1421
err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1422
if (err < 0)
1423
return err;
1424
return ip_tunnel_newlink(params->link_net ? : dev_net(dev), dev, tb, &p,
1425
fwmark);
1426
}
1427
1428
static int erspan_newlink(struct net_device *dev,
1429
struct rtnl_newlink_params *params,
1430
struct netlink_ext_ack *extack)
1431
{
1432
struct nlattr **data = params->data;
1433
struct nlattr **tb = params->tb;
1434
struct ip_tunnel_parm_kern p;
1435
__u32 fwmark = 0;
1436
int err;
1437
1438
err = ipgre_newlink_encap_setup(dev, data);
1439
if (err)
1440
return err;
1441
1442
err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1443
if (err)
1444
return err;
1445
return ip_tunnel_newlink(params->link_net ? : dev_net(dev), dev, tb, &p,
1446
fwmark);
1447
}
1448
1449
static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1450
struct nlattr *data[],
1451
struct netlink_ext_ack *extack)
1452
{
1453
struct ip_tunnel *t = netdev_priv(dev);
1454
struct ip_tunnel_parm_kern p;
1455
__u32 fwmark = t->fwmark;
1456
int err;
1457
1458
err = ipgre_newlink_encap_setup(dev, data);
1459
if (err)
1460
return err;
1461
1462
err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1463
if (err < 0)
1464
return err;
1465
1466
err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1467
if (err < 0)
1468
return err;
1469
1470
ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
1471
ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
1472
1473
ipgre_link_update(dev, !tb[IFLA_MTU]);
1474
1475
return 0;
1476
}
1477
1478
static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1479
struct nlattr *data[],
1480
struct netlink_ext_ack *extack)
1481
{
1482
struct ip_tunnel *t = netdev_priv(dev);
1483
struct ip_tunnel_parm_kern p;
1484
__u32 fwmark = t->fwmark;
1485
int err;
1486
1487
err = ipgre_newlink_encap_setup(dev, data);
1488
if (err)
1489
return err;
1490
1491
err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1492
if (err < 0)
1493
return err;
1494
1495
err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1496
if (err < 0)
1497
return err;
1498
1499
ip_tunnel_flags_copy(t->parms.i_flags, p.i_flags);
1500
ip_tunnel_flags_copy(t->parms.o_flags, p.o_flags);
1501
1502
return 0;
1503
}
1504
1505
static size_t ipgre_get_size(const struct net_device *dev)
1506
{
1507
return
1508
/* IFLA_GRE_LINK */
1509
nla_total_size(4) +
1510
/* IFLA_GRE_IFLAGS */
1511
nla_total_size(2) +
1512
/* IFLA_GRE_OFLAGS */
1513
nla_total_size(2) +
1514
/* IFLA_GRE_IKEY */
1515
nla_total_size(4) +
1516
/* IFLA_GRE_OKEY */
1517
nla_total_size(4) +
1518
/* IFLA_GRE_LOCAL */
1519
nla_total_size(4) +
1520
/* IFLA_GRE_REMOTE */
1521
nla_total_size(4) +
1522
/* IFLA_GRE_TTL */
1523
nla_total_size(1) +
1524
/* IFLA_GRE_TOS */
1525
nla_total_size(1) +
1526
/* IFLA_GRE_PMTUDISC */
1527
nla_total_size(1) +
1528
/* IFLA_GRE_ENCAP_TYPE */
1529
nla_total_size(2) +
1530
/* IFLA_GRE_ENCAP_FLAGS */
1531
nla_total_size(2) +
1532
/* IFLA_GRE_ENCAP_SPORT */
1533
nla_total_size(2) +
1534
/* IFLA_GRE_ENCAP_DPORT */
1535
nla_total_size(2) +
1536
/* IFLA_GRE_COLLECT_METADATA */
1537
nla_total_size(0) +
1538
/* IFLA_GRE_IGNORE_DF */
1539
nla_total_size(1) +
1540
/* IFLA_GRE_FWMARK */
1541
nla_total_size(4) +
1542
/* IFLA_GRE_ERSPAN_INDEX */
1543
nla_total_size(4) +
1544
/* IFLA_GRE_ERSPAN_VER */
1545
nla_total_size(1) +
1546
/* IFLA_GRE_ERSPAN_DIR */
1547
nla_total_size(1) +
1548
/* IFLA_GRE_ERSPAN_HWID */
1549
nla_total_size(2) +
1550
0;
1551
}
1552
1553
static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1554
{
1555
struct ip_tunnel *t = netdev_priv(dev);
1556
struct ip_tunnel_parm_kern *p = &t->parms;
1557
IP_TUNNEL_DECLARE_FLAGS(o_flags);
1558
1559
ip_tunnel_flags_copy(o_flags, p->o_flags);
1560
1561
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1562
nla_put_be16(skb, IFLA_GRE_IFLAGS,
1563
gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1564
nla_put_be16(skb, IFLA_GRE_OFLAGS,
1565
gre_tnl_flags_to_gre_flags(o_flags)) ||
1566
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1567
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1568
nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1569
nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1570
nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1571
nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1572
nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1573
!!(p->iph.frag_off & htons(IP_DF))) ||
1574
nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1575
goto nla_put_failure;
1576
1577
if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1578
t->encap.type) ||
1579
nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1580
t->encap.sport) ||
1581
nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1582
t->encap.dport) ||
1583
nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1584
t->encap.flags))
1585
goto nla_put_failure;
1586
1587
if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1588
goto nla_put_failure;
1589
1590
if (t->collect_md) {
1591
if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1592
goto nla_put_failure;
1593
}
1594
1595
return 0;
1596
1597
nla_put_failure:
1598
return -EMSGSIZE;
1599
}
1600
1601
static int erspan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1602
{
1603
struct ip_tunnel *t = netdev_priv(dev);
1604
1605
if (t->erspan_ver <= 2) {
1606
if (t->erspan_ver != 0 && !t->collect_md)
1607
__set_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags);
1608
1609
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1610
goto nla_put_failure;
1611
1612
if (t->erspan_ver == 1) {
1613
if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1614
goto nla_put_failure;
1615
} else if (t->erspan_ver == 2) {
1616
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1617
goto nla_put_failure;
1618
if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1619
goto nla_put_failure;
1620
}
1621
}
1622
1623
return ipgre_fill_info(skb, dev);
1624
1625
nla_put_failure:
1626
return -EMSGSIZE;
1627
}
1628
1629
static void erspan_setup(struct net_device *dev)
1630
{
1631
struct ip_tunnel *t = netdev_priv(dev);
1632
1633
ether_setup(dev);
1634
dev->max_mtu = 0;
1635
dev->netdev_ops = &erspan_netdev_ops;
1636
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1637
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1638
ip_tunnel_setup(dev, erspan_net_id);
1639
t->erspan_ver = 1;
1640
}
1641
1642
static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1643
[IFLA_GRE_LINK] = { .type = NLA_U32 },
1644
[IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1645
[IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1646
[IFLA_GRE_IKEY] = { .type = NLA_U32 },
1647
[IFLA_GRE_OKEY] = { .type = NLA_U32 },
1648
[IFLA_GRE_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) },
1649
[IFLA_GRE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) },
1650
[IFLA_GRE_TTL] = { .type = NLA_U8 },
1651
[IFLA_GRE_TOS] = { .type = NLA_U8 },
1652
[IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1653
[IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1654
[IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1655
[IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1656
[IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1657
[IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1658
[IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1659
[IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1660
[IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1661
[IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
1662
[IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
1663
[IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
1664
};
1665
1666
static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1667
.kind = "gre",
1668
.maxtype = IFLA_GRE_MAX,
1669
.policy = ipgre_policy,
1670
.priv_size = sizeof(struct ip_tunnel),
1671
.setup = ipgre_tunnel_setup,
1672
.validate = ipgre_tunnel_validate,
1673
.newlink = ipgre_newlink,
1674
.changelink = ipgre_changelink,
1675
.dellink = ip_tunnel_dellink,
1676
.get_size = ipgre_get_size,
1677
.fill_info = ipgre_fill_info,
1678
.get_link_net = ip_tunnel_get_link_net,
1679
};
1680
1681
static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1682
.kind = "gretap",
1683
.maxtype = IFLA_GRE_MAX,
1684
.policy = ipgre_policy,
1685
.priv_size = sizeof(struct ip_tunnel),
1686
.setup = ipgre_tap_setup,
1687
.validate = ipgre_tap_validate,
1688
.newlink = ipgre_newlink,
1689
.changelink = ipgre_changelink,
1690
.dellink = ip_tunnel_dellink,
1691
.get_size = ipgre_get_size,
1692
.fill_info = ipgre_fill_info,
1693
.get_link_net = ip_tunnel_get_link_net,
1694
};
1695
1696
static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1697
.kind = "erspan",
1698
.maxtype = IFLA_GRE_MAX,
1699
.policy = ipgre_policy,
1700
.priv_size = sizeof(struct ip_tunnel),
1701
.setup = erspan_setup,
1702
.validate = erspan_validate,
1703
.newlink = erspan_newlink,
1704
.changelink = erspan_changelink,
1705
.dellink = ip_tunnel_dellink,
1706
.get_size = ipgre_get_size,
1707
.fill_info = erspan_fill_info,
1708
.get_link_net = ip_tunnel_get_link_net,
1709
};
1710
1711
struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1712
u8 name_assign_type)
1713
{
1714
struct rtnl_newlink_params params = { .src_net = net };
1715
struct nlattr *tb[IFLA_MAX + 1];
1716
struct net_device *dev;
1717
LIST_HEAD(list_kill);
1718
struct ip_tunnel *t;
1719
int err;
1720
1721
memset(&tb, 0, sizeof(tb));
1722
params.tb = tb;
1723
1724
dev = rtnl_create_link(net, name, name_assign_type,
1725
&ipgre_tap_ops, tb, NULL);
1726
if (IS_ERR(dev))
1727
return dev;
1728
1729
/* Configure flow based GRE device. */
1730
t = netdev_priv(dev);
1731
t->collect_md = true;
1732
1733
err = ipgre_newlink(dev, &params, NULL);
1734
if (err < 0) {
1735
free_netdev(dev);
1736
return ERR_PTR(err);
1737
}
1738
1739
/* openvswitch users expect packet sizes to be unrestricted,
1740
* so set the largest MTU we can.
1741
*/
1742
err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1743
if (err)
1744
goto out;
1745
1746
err = rtnl_configure_link(dev, NULL, 0, NULL);
1747
if (err < 0)
1748
goto out;
1749
1750
return dev;
1751
out:
1752
ip_tunnel_dellink(dev, &list_kill);
1753
unregister_netdevice_many(&list_kill);
1754
return ERR_PTR(err);
1755
}
1756
EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1757
1758
static int __net_init ipgre_tap_init_net(struct net *net)
1759
{
1760
return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1761
}
1762
1763
static void __net_exit ipgre_tap_exit_rtnl(struct net *net,
1764
struct list_head *dev_to_kill)
1765
{
1766
ip_tunnel_delete_net(net, gre_tap_net_id, &ipgre_tap_ops, dev_to_kill);
1767
}
1768
1769
static struct pernet_operations ipgre_tap_net_ops = {
1770
.init = ipgre_tap_init_net,
1771
.exit_rtnl = ipgre_tap_exit_rtnl,
1772
.id = &gre_tap_net_id,
1773
.size = sizeof(struct ip_tunnel_net),
1774
};
1775
1776
static int __net_init erspan_init_net(struct net *net)
1777
{
1778
return ip_tunnel_init_net(net, erspan_net_id,
1779
&erspan_link_ops, "erspan0");
1780
}
1781
1782
static void __net_exit erspan_exit_rtnl(struct net *net,
1783
struct list_head *dev_to_kill)
1784
{
1785
ip_tunnel_delete_net(net, erspan_net_id, &erspan_link_ops, dev_to_kill);
1786
}
1787
1788
static struct pernet_operations erspan_net_ops = {
1789
.init = erspan_init_net,
1790
.exit_rtnl = erspan_exit_rtnl,
1791
.id = &erspan_net_id,
1792
.size = sizeof(struct ip_tunnel_net),
1793
};
1794
1795
static int __init ipgre_init(void)
1796
{
1797
int err;
1798
1799
pr_info("GRE over IPv4 tunneling driver\n");
1800
1801
err = register_pernet_device(&ipgre_net_ops);
1802
if (err < 0)
1803
return err;
1804
1805
err = register_pernet_device(&ipgre_tap_net_ops);
1806
if (err < 0)
1807
goto pnet_tap_failed;
1808
1809
err = register_pernet_device(&erspan_net_ops);
1810
if (err < 0)
1811
goto pnet_erspan_failed;
1812
1813
err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1814
if (err < 0) {
1815
pr_info("%s: can't add protocol\n", __func__);
1816
goto add_proto_failed;
1817
}
1818
1819
err = rtnl_link_register(&ipgre_link_ops);
1820
if (err < 0)
1821
goto rtnl_link_failed;
1822
1823
err = rtnl_link_register(&ipgre_tap_ops);
1824
if (err < 0)
1825
goto tap_ops_failed;
1826
1827
err = rtnl_link_register(&erspan_link_ops);
1828
if (err < 0)
1829
goto erspan_link_failed;
1830
1831
return 0;
1832
1833
erspan_link_failed:
1834
rtnl_link_unregister(&ipgre_tap_ops);
1835
tap_ops_failed:
1836
rtnl_link_unregister(&ipgre_link_ops);
1837
rtnl_link_failed:
1838
gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1839
add_proto_failed:
1840
unregister_pernet_device(&erspan_net_ops);
1841
pnet_erspan_failed:
1842
unregister_pernet_device(&ipgre_tap_net_ops);
1843
pnet_tap_failed:
1844
unregister_pernet_device(&ipgre_net_ops);
1845
return err;
1846
}
1847
1848
static void __exit ipgre_fini(void)
1849
{
1850
rtnl_link_unregister(&ipgre_tap_ops);
1851
rtnl_link_unregister(&ipgre_link_ops);
1852
rtnl_link_unregister(&erspan_link_ops);
1853
gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1854
unregister_pernet_device(&ipgre_tap_net_ops);
1855
unregister_pernet_device(&ipgre_net_ops);
1856
unregister_pernet_device(&erspan_net_ops);
1857
}
1858
1859
module_init(ipgre_init);
1860
module_exit(ipgre_fini);
1861
MODULE_DESCRIPTION("IPv4 GRE tunnels over IP library");
1862
MODULE_LICENSE("GPL");
1863
MODULE_ALIAS_RTNL_LINK("gre");
1864
MODULE_ALIAS_RTNL_LINK("gretap");
1865
MODULE_ALIAS_RTNL_LINK("erspan");
1866
MODULE_ALIAS_NETDEV("gre0");
1867
MODULE_ALIAS_NETDEV("gretap0");
1868
MODULE_ALIAS_NETDEV("erspan0");
1869
1870