Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/ipv6/ip6_gre.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* GRE over IPv6 protocol decoder.
4
*
5
* Authors: Dmitry Kozlov ([email protected])
6
*/
7
8
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10
#include <linux/capability.h>
11
#include <linux/module.h>
12
#include <linux/types.h>
13
#include <linux/kernel.h>
14
#include <linux/slab.h>
15
#include <linux/uaccess.h>
16
#include <linux/skbuff.h>
17
#include <linux/netdevice.h>
18
#include <linux/in.h>
19
#include <linux/tcp.h>
20
#include <linux/udp.h>
21
#include <linux/if_arp.h>
22
#include <linux/init.h>
23
#include <linux/in6.h>
24
#include <linux/inetdevice.h>
25
#include <linux/igmp.h>
26
#include <linux/netfilter_ipv4.h>
27
#include <linux/etherdevice.h>
28
#include <linux/if_ether.h>
29
#include <linux/hash.h>
30
#include <linux/if_tunnel.h>
31
#include <linux/ip6_tunnel.h>
32
33
#include <net/sock.h>
34
#include <net/ip.h>
35
#include <net/ip_tunnels.h>
36
#include <net/icmp.h>
37
#include <net/protocol.h>
38
#include <net/addrconf.h>
39
#include <net/arp.h>
40
#include <net/checksum.h>
41
#include <net/dsfield.h>
42
#include <net/inet_ecn.h>
43
#include <net/xfrm.h>
44
#include <net/net_namespace.h>
45
#include <net/netns/generic.h>
46
#include <net/netdev_lock.h>
47
#include <net/rtnetlink.h>
48
49
#include <net/ipv6.h>
50
#include <net/ip6_fib.h>
51
#include <net/ip6_route.h>
52
#include <net/ip6_tunnel.h>
53
#include <net/gre.h>
54
#include <net/erspan.h>
55
#include <net/dst_metadata.h>
56
57
58
static bool log_ecn_error = true;
59
module_param(log_ecn_error, bool, 0644);
60
MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
61
62
#define IP6_GRE_HASH_SIZE_SHIFT 5
63
#define IP6_GRE_HASH_SIZE (1 << IP6_GRE_HASH_SIZE_SHIFT)
64
65
static unsigned int ip6gre_net_id __read_mostly;
66
struct ip6gre_net {
67
struct ip6_tnl __rcu *tunnels[4][IP6_GRE_HASH_SIZE];
68
69
struct ip6_tnl __rcu *collect_md_tun;
70
struct ip6_tnl __rcu *collect_md_tun_erspan;
71
struct net_device *fb_tunnel_dev;
72
};
73
74
static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
75
static struct rtnl_link_ops ip6gre_tap_ops __read_mostly;
76
static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly;
77
static int ip6gre_tunnel_init(struct net_device *dev);
78
static void ip6gre_tunnel_setup(struct net_device *dev);
79
static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
80
static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu);
81
static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu);
82
83
/* Tunnel hash table */
84
85
/*
86
4 hash tables:
87
88
3: (remote,local)
89
2: (remote,*)
90
1: (*,local)
91
0: (*,*)
92
93
We require exact key match i.e. if a key is present in packet
94
it will match only tunnel with the same key; if it is not present,
95
it will match only keyless tunnel.
96
97
All keysless packets, if not matched configured keyless tunnels
98
will match fallback tunnel.
99
*/
100
101
#define HASH_KEY(key) (((__force u32)key^((__force u32)key>>4))&(IP6_GRE_HASH_SIZE - 1))
102
static u32 HASH_ADDR(const struct in6_addr *addr)
103
{
104
u32 hash = ipv6_addr_hash(addr);
105
106
return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT);
107
}
108
109
#define tunnels_r_l tunnels[3]
110
#define tunnels_r tunnels[2]
111
#define tunnels_l tunnels[1]
112
#define tunnels_wc tunnels[0]
113
114
static bool ip6gre_tunnel_match(struct ip6_tnl *t, int dev_type, int link,
115
int *cand_score, struct ip6_tnl **ret)
116
{
117
int score = 0;
118
119
if (t->dev->type != ARPHRD_IP6GRE &&
120
t->dev->type != dev_type)
121
return false;
122
123
if (t->parms.link != link)
124
score |= 1;
125
if (t->dev->type != dev_type)
126
score |= 2;
127
if (score == 0) {
128
*ret = t;
129
return true;
130
}
131
132
if (score < *cand_score) {
133
*ret = t;
134
*cand_score = score;
135
}
136
return false;
137
}
138
139
/* Given src, dst and key, find appropriate for input tunnel. */
140
static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
141
const struct in6_addr *remote, const struct in6_addr *local,
142
__be32 key, __be16 gre_proto)
143
{
144
struct net *net = dev_net(dev);
145
int link = dev->ifindex;
146
unsigned int h0 = HASH_ADDR(remote);
147
unsigned int h1 = HASH_KEY(key);
148
struct ip6_tnl *t, *cand = NULL;
149
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
150
int dev_type = (gre_proto == htons(ETH_P_TEB) ||
151
gre_proto == htons(ETH_P_ERSPAN) ||
152
gre_proto == htons(ETH_P_ERSPAN2)) ?
153
ARPHRD_ETHER : ARPHRD_IP6GRE;
154
struct net_device *ndev;
155
int cand_score = 4;
156
157
for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
158
if (!ipv6_addr_equal(local, &t->parms.laddr) ||
159
!ipv6_addr_equal(remote, &t->parms.raddr) ||
160
key != t->parms.i_key ||
161
!(t->dev->flags & IFF_UP))
162
continue;
163
164
if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand))
165
return cand;
166
}
167
168
for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
169
if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
170
key != t->parms.i_key ||
171
!(t->dev->flags & IFF_UP))
172
continue;
173
174
if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand))
175
return cand;
176
}
177
178
for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) {
179
if ((!ipv6_addr_equal(local, &t->parms.laddr) &&
180
(!ipv6_addr_equal(local, &t->parms.raddr) ||
181
!ipv6_addr_is_multicast(local))) ||
182
key != t->parms.i_key ||
183
!(t->dev->flags & IFF_UP))
184
continue;
185
186
if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand))
187
return cand;
188
}
189
190
for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) {
191
if (t->parms.i_key != key ||
192
!(t->dev->flags & IFF_UP))
193
continue;
194
195
if (ip6gre_tunnel_match(t, dev_type, link, &cand_score, &cand))
196
return cand;
197
}
198
199
if (cand)
200
return cand;
201
202
if (gre_proto == htons(ETH_P_ERSPAN) ||
203
gre_proto == htons(ETH_P_ERSPAN2))
204
t = rcu_dereference(ign->collect_md_tun_erspan);
205
else
206
t = rcu_dereference(ign->collect_md_tun);
207
208
if (t && t->dev->flags & IFF_UP)
209
return t;
210
211
ndev = READ_ONCE(ign->fb_tunnel_dev);
212
if (ndev && ndev->flags & IFF_UP)
213
return netdev_priv(ndev);
214
215
return NULL;
216
}
217
218
static struct ip6_tnl __rcu **__ip6gre_bucket(struct ip6gre_net *ign,
219
const struct __ip6_tnl_parm *p)
220
{
221
const struct in6_addr *remote = &p->raddr;
222
const struct in6_addr *local = &p->laddr;
223
unsigned int h = HASH_KEY(p->i_key);
224
int prio = 0;
225
226
if (!ipv6_addr_any(local))
227
prio |= 1;
228
if (!ipv6_addr_any(remote) && !ipv6_addr_is_multicast(remote)) {
229
prio |= 2;
230
h ^= HASH_ADDR(remote);
231
}
232
233
return &ign->tunnels[prio][h];
234
}
235
236
static void ip6gre_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
237
{
238
if (t->parms.collect_md)
239
rcu_assign_pointer(ign->collect_md_tun, t);
240
}
241
242
static void ip6erspan_tunnel_link_md(struct ip6gre_net *ign, struct ip6_tnl *t)
243
{
244
if (t->parms.collect_md)
245
rcu_assign_pointer(ign->collect_md_tun_erspan, t);
246
}
247
248
static void ip6gre_tunnel_unlink_md(struct ip6gre_net *ign, struct ip6_tnl *t)
249
{
250
if (t->parms.collect_md)
251
rcu_assign_pointer(ign->collect_md_tun, NULL);
252
}
253
254
static void ip6erspan_tunnel_unlink_md(struct ip6gre_net *ign,
255
struct ip6_tnl *t)
256
{
257
if (t->parms.collect_md)
258
rcu_assign_pointer(ign->collect_md_tun_erspan, NULL);
259
}
260
261
static inline struct ip6_tnl __rcu **ip6gre_bucket(struct ip6gre_net *ign,
262
const struct ip6_tnl *t)
263
{
264
return __ip6gre_bucket(ign, &t->parms);
265
}
266
267
static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t)
268
{
269
struct ip6_tnl __rcu **tp = ip6gre_bucket(ign, t);
270
271
rcu_assign_pointer(t->next, rtnl_dereference(*tp));
272
rcu_assign_pointer(*tp, t);
273
}
274
275
static void ip6gre_tunnel_unlink(struct ip6gre_net *ign, struct ip6_tnl *t)
276
{
277
struct ip6_tnl __rcu **tp;
278
struct ip6_tnl *iter;
279
280
for (tp = ip6gre_bucket(ign, t);
281
(iter = rtnl_dereference(*tp)) != NULL;
282
tp = &iter->next) {
283
if (t == iter) {
284
rcu_assign_pointer(*tp, t->next);
285
break;
286
}
287
}
288
}
289
290
static struct ip6_tnl *ip6gre_tunnel_find(struct net *net,
291
const struct __ip6_tnl_parm *parms,
292
int type)
293
{
294
const struct in6_addr *remote = &parms->raddr;
295
const struct in6_addr *local = &parms->laddr;
296
__be32 key = parms->i_key;
297
int link = parms->link;
298
struct ip6_tnl *t;
299
struct ip6_tnl __rcu **tp;
300
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
301
302
for (tp = __ip6gre_bucket(ign, parms);
303
(t = rtnl_dereference(*tp)) != NULL;
304
tp = &t->next)
305
if (ipv6_addr_equal(local, &t->parms.laddr) &&
306
ipv6_addr_equal(remote, &t->parms.raddr) &&
307
key == t->parms.i_key &&
308
link == t->parms.link &&
309
type == t->dev->type)
310
break;
311
312
return t;
313
}
314
315
static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
316
const struct __ip6_tnl_parm *parms, int create)
317
{
318
struct ip6_tnl *t, *nt;
319
struct net_device *dev;
320
char name[IFNAMSIZ];
321
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
322
323
t = ip6gre_tunnel_find(net, parms, ARPHRD_IP6GRE);
324
if (t && create)
325
return NULL;
326
if (t || !create)
327
return t;
328
329
if (parms->name[0]) {
330
if (!dev_valid_name(parms->name))
331
return NULL;
332
strscpy(name, parms->name, IFNAMSIZ);
333
} else {
334
strcpy(name, "ip6gre%d");
335
}
336
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
337
ip6gre_tunnel_setup);
338
if (!dev)
339
return NULL;
340
341
dev_net_set(dev, net);
342
343
nt = netdev_priv(dev);
344
nt->parms = *parms;
345
dev->rtnl_link_ops = &ip6gre_link_ops;
346
347
nt->dev = dev;
348
nt->net = dev_net(dev);
349
350
if (register_netdevice(dev) < 0)
351
goto failed_free;
352
353
ip6gre_tnl_link_config(nt, 1);
354
ip6gre_tunnel_link(ign, nt);
355
return nt;
356
357
failed_free:
358
free_netdev(dev);
359
return NULL;
360
}
361
362
static void ip6erspan_tunnel_uninit(struct net_device *dev)
363
{
364
struct ip6_tnl *t = netdev_priv(dev);
365
struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
366
367
ip6erspan_tunnel_unlink_md(ign, t);
368
ip6gre_tunnel_unlink(ign, t);
369
dst_cache_reset(&t->dst_cache);
370
netdev_put(dev, &t->dev_tracker);
371
}
372
373
static void ip6gre_tunnel_uninit(struct net_device *dev)
374
{
375
struct ip6_tnl *t = netdev_priv(dev);
376
struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
377
378
ip6gre_tunnel_unlink_md(ign, t);
379
ip6gre_tunnel_unlink(ign, t);
380
if (ign->fb_tunnel_dev == dev)
381
WRITE_ONCE(ign->fb_tunnel_dev, NULL);
382
dst_cache_reset(&t->dst_cache);
383
netdev_put(dev, &t->dev_tracker);
384
}
385
386
387
static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
388
u8 type, u8 code, int offset, __be32 info)
389
{
390
struct net *net = dev_net(skb->dev);
391
const struct ipv6hdr *ipv6h;
392
struct tnl_ptk_info tpi;
393
struct ip6_tnl *t;
394
395
if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6),
396
offset) < 0)
397
return -EINVAL;
398
399
ipv6h = (const struct ipv6hdr *)skb->data;
400
t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
401
tpi.key, tpi.proto);
402
if (!t)
403
return -ENOENT;
404
405
switch (type) {
406
case ICMPV6_DEST_UNREACH:
407
net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
408
t->parms.name);
409
if (code != ICMPV6_PORT_UNREACH)
410
break;
411
return 0;
412
case ICMPV6_TIME_EXCEED:
413
if (code == ICMPV6_EXC_HOPLIMIT) {
414
net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
415
t->parms.name);
416
break;
417
}
418
return 0;
419
case ICMPV6_PARAMPROB: {
420
struct ipv6_tlv_tnl_enc_lim *tel;
421
__u32 teli;
422
423
teli = 0;
424
if (code == ICMPV6_HDR_FIELD)
425
teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
426
427
if (teli && teli == be32_to_cpu(info) - 2) {
428
tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
429
if (tel->encap_limit == 0) {
430
net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
431
t->parms.name);
432
}
433
} else {
434
net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
435
t->parms.name);
436
}
437
return 0;
438
}
439
case ICMPV6_PKT_TOOBIG:
440
ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
441
return 0;
442
case NDISC_REDIRECT:
443
ip6_redirect(skb, net, skb->dev->ifindex, 0,
444
sock_net_uid(net, NULL));
445
return 0;
446
}
447
448
if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
449
t->err_count++;
450
else
451
t->err_count = 1;
452
t->err_time = jiffies;
453
454
return 0;
455
}
456
457
static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
458
{
459
const struct ipv6hdr *ipv6h;
460
struct ip6_tnl *tunnel;
461
462
ipv6h = ipv6_hdr(skb);
463
tunnel = ip6gre_tunnel_lookup(skb->dev,
464
&ipv6h->saddr, &ipv6h->daddr, tpi->key,
465
tpi->proto);
466
if (tunnel) {
467
if (tunnel->parms.collect_md) {
468
IP_TUNNEL_DECLARE_FLAGS(flags);
469
struct metadata_dst *tun_dst;
470
__be64 tun_id;
471
472
ip_tunnel_flags_copy(flags, tpi->flags);
473
tun_id = key32_to_tunnel_id(tpi->key);
474
475
tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id, 0);
476
if (!tun_dst)
477
return PACKET_REJECT;
478
479
ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
480
} else {
481
ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
482
}
483
484
return PACKET_RCVD;
485
}
486
487
return PACKET_REJECT;
488
}
489
490
static int ip6erspan_rcv(struct sk_buff *skb,
491
struct tnl_ptk_info *tpi,
492
int gre_hdr_len)
493
{
494
struct erspan_base_hdr *ershdr;
495
const struct ipv6hdr *ipv6h;
496
struct erspan_md2 *md2;
497
struct ip6_tnl *tunnel;
498
u8 ver;
499
500
if (unlikely(!pskb_may_pull(skb, sizeof(*ershdr))))
501
return PACKET_REJECT;
502
503
ipv6h = ipv6_hdr(skb);
504
ershdr = (struct erspan_base_hdr *)skb->data;
505
ver = ershdr->ver;
506
507
tunnel = ip6gre_tunnel_lookup(skb->dev,
508
&ipv6h->saddr, &ipv6h->daddr, tpi->key,
509
tpi->proto);
510
if (tunnel) {
511
int len = erspan_hdr_len(ver);
512
513
if (unlikely(!pskb_may_pull(skb, len)))
514
return PACKET_REJECT;
515
516
if (__iptunnel_pull_header(skb, len,
517
htons(ETH_P_TEB),
518
false, false) < 0)
519
return PACKET_REJECT;
520
521
if (tunnel->parms.collect_md) {
522
struct erspan_metadata *pkt_md, *md;
523
IP_TUNNEL_DECLARE_FLAGS(flags);
524
struct metadata_dst *tun_dst;
525
struct ip_tunnel_info *info;
526
unsigned char *gh;
527
__be64 tun_id;
528
529
__set_bit(IP_TUNNEL_KEY_BIT, tpi->flags);
530
ip_tunnel_flags_copy(flags, tpi->flags);
531
tun_id = key32_to_tunnel_id(tpi->key);
532
533
tun_dst = ipv6_tun_rx_dst(skb, flags, tun_id,
534
sizeof(*md));
535
if (!tun_dst)
536
return PACKET_REJECT;
537
538
/* skb can be uncloned in __iptunnel_pull_header, so
539
* old pkt_md is no longer valid and we need to reset
540
* it
541
*/
542
gh = skb_network_header(skb) +
543
skb_network_header_len(skb);
544
pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
545
sizeof(*ershdr));
546
info = &tun_dst->u.tun_info;
547
md = ip_tunnel_info_opts(info);
548
md->version = ver;
549
md2 = &md->u.md2;
550
memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
551
ERSPAN_V2_MDSIZE);
552
__set_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
553
info->key.tun_flags);
554
info->options_len = sizeof(*md);
555
556
ip6_tnl_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
557
558
} else {
559
ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
560
}
561
562
return PACKET_RCVD;
563
}
564
565
return PACKET_REJECT;
566
}
567
568
static int gre_rcv(struct sk_buff *skb)
569
{
570
struct tnl_ptk_info tpi;
571
bool csum_err = false;
572
int hdr_len;
573
574
hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IPV6), 0);
575
if (hdr_len < 0)
576
goto drop;
577
578
if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false))
579
goto drop;
580
581
if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
582
tpi.proto == htons(ETH_P_ERSPAN2))) {
583
if (ip6erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
584
return 0;
585
goto out;
586
}
587
588
if (ip6gre_rcv(skb, &tpi) == PACKET_RCVD)
589
return 0;
590
591
out:
592
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
593
drop:
594
kfree_skb(skb);
595
return 0;
596
}
597
598
static int gre_handle_offloads(struct sk_buff *skb, bool csum)
599
{
600
return iptunnel_handle_offloads(skb,
601
csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
602
}
603
604
static void prepare_ip6gre_xmit_ipv4(struct sk_buff *skb,
605
struct net_device *dev,
606
struct flowi6 *fl6, __u8 *dsfield,
607
int *encap_limit)
608
{
609
const struct iphdr *iph = ip_hdr(skb);
610
struct ip6_tnl *t = netdev_priv(dev);
611
612
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
613
*encap_limit = t->parms.encap_limit;
614
615
memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
616
617
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
618
*dsfield = ipv4_get_dsfield(iph);
619
else
620
*dsfield = ip6_tclass(t->parms.flowinfo);
621
622
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
623
fl6->flowi6_mark = skb->mark;
624
else
625
fl6->flowi6_mark = t->parms.fwmark;
626
627
fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
628
}
629
630
static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
631
struct net_device *dev,
632
struct flowi6 *fl6, __u8 *dsfield,
633
int *encap_limit)
634
{
635
struct ipv6hdr *ipv6h;
636
struct ip6_tnl *t = netdev_priv(dev);
637
__u16 offset;
638
639
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
640
/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
641
ipv6h = ipv6_hdr(skb);
642
643
if (offset > 0) {
644
struct ipv6_tlv_tnl_enc_lim *tel;
645
646
tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
647
if (tel->encap_limit == 0) {
648
icmpv6_ndo_send(skb, ICMPV6_PARAMPROB,
649
ICMPV6_HDR_FIELD, offset + 2);
650
return -1;
651
}
652
*encap_limit = tel->encap_limit - 1;
653
} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
654
*encap_limit = t->parms.encap_limit;
655
}
656
657
memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
658
659
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
660
*dsfield = ipv6_get_dsfield(ipv6h);
661
else
662
*dsfield = ip6_tclass(t->parms.flowinfo);
663
664
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
665
fl6->flowlabel |= ip6_flowlabel(ipv6h);
666
667
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
668
fl6->flowi6_mark = skb->mark;
669
else
670
fl6->flowi6_mark = t->parms.fwmark;
671
672
fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
673
674
return 0;
675
}
676
677
static int prepare_ip6gre_xmit_other(struct sk_buff *skb,
678
struct net_device *dev,
679
struct flowi6 *fl6, __u8 *dsfield,
680
int *encap_limit)
681
{
682
struct ip6_tnl *t = netdev_priv(dev);
683
684
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
685
*encap_limit = t->parms.encap_limit;
686
687
memcpy(fl6, &t->fl.u.ip6, sizeof(*fl6));
688
689
if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
690
*dsfield = 0;
691
else
692
*dsfield = ip6_tclass(t->parms.flowinfo);
693
694
if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
695
fl6->flowi6_mark = skb->mark;
696
else
697
fl6->flowi6_mark = t->parms.fwmark;
698
699
fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
700
701
return 0;
702
}
703
704
static struct ip_tunnel_info *skb_tunnel_info_txcheck(struct sk_buff *skb)
705
{
706
struct ip_tunnel_info *tun_info;
707
708
tun_info = skb_tunnel_info(skb);
709
if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX)))
710
return ERR_PTR(-EINVAL);
711
712
return tun_info;
713
}
714
715
static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
716
struct net_device *dev, __u8 dsfield,
717
struct flowi6 *fl6, int encap_limit,
718
__u32 *pmtu, __be16 proto)
719
{
720
struct ip6_tnl *tunnel = netdev_priv(dev);
721
IP_TUNNEL_DECLARE_FLAGS(flags);
722
__be16 protocol;
723
724
if (dev->type == ARPHRD_ETHER)
725
IPCB(skb)->flags = 0;
726
727
if (dev->header_ops && dev->type == ARPHRD_IP6GRE)
728
fl6->daddr = ((struct ipv6hdr *)skb->data)->daddr;
729
else
730
fl6->daddr = tunnel->parms.raddr;
731
732
/* Push GRE header. */
733
protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
734
735
if (tunnel->parms.collect_md) {
736
struct ip_tunnel_info *tun_info;
737
const struct ip_tunnel_key *key;
738
int tun_hlen;
739
740
tun_info = skb_tunnel_info_txcheck(skb);
741
if (IS_ERR(tun_info) ||
742
unlikely(ip_tunnel_info_af(tun_info) != AF_INET6))
743
return -EINVAL;
744
745
key = &tun_info->key;
746
memset(fl6, 0, sizeof(*fl6));
747
fl6->flowi6_proto = IPPROTO_GRE;
748
fl6->daddr = key->u.ipv6.dst;
749
fl6->flowlabel = key->label;
750
fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
751
fl6->fl6_gre_key = tunnel_id_to_key32(key->tun_id);
752
753
dsfield = key->tos;
754
ip_tunnel_flags_zero(flags);
755
__set_bit(IP_TUNNEL_CSUM_BIT, flags);
756
__set_bit(IP_TUNNEL_KEY_BIT, flags);
757
__set_bit(IP_TUNNEL_SEQ_BIT, flags);
758
ip_tunnel_flags_and(flags, flags, key->tun_flags);
759
tun_hlen = gre_calc_hlen(flags);
760
761
if (skb_cow_head(skb, dev->needed_headroom ?: tun_hlen + tunnel->encap_hlen))
762
return -ENOMEM;
763
764
gre_build_header(skb, tun_hlen,
765
flags, protocol,
766
tunnel_id_to_key32(tun_info->key.tun_id),
767
test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
768
htonl(atomic_fetch_inc(&tunnel->o_seqno)) :
769
0);
770
771
} else {
772
if (skb_cow_head(skb, dev->needed_headroom ?: tunnel->hlen))
773
return -ENOMEM;
774
775
ip_tunnel_flags_copy(flags, tunnel->parms.o_flags);
776
777
gre_build_header(skb, tunnel->tun_hlen, flags,
778
protocol, tunnel->parms.o_key,
779
test_bit(IP_TUNNEL_SEQ_BIT, flags) ?
780
htonl(atomic_fetch_inc(&tunnel->o_seqno)) :
781
0);
782
}
783
784
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
785
NEXTHDR_GRE);
786
}
787
788
static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
789
{
790
struct ip6_tnl *t = netdev_priv(dev);
791
int encap_limit = -1;
792
struct flowi6 fl6;
793
__u8 dsfield = 0;
794
__u32 mtu;
795
int err;
796
797
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
798
799
if (!t->parms.collect_md)
800
prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
801
&dsfield, &encap_limit);
802
803
err = gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
804
t->parms.o_flags));
805
if (err)
806
return -1;
807
808
err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
809
skb->protocol);
810
if (err != 0) {
811
/* XXX: send ICMP error even if DF is not set. */
812
if (err == -EMSGSIZE)
813
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
814
htonl(mtu));
815
return -1;
816
}
817
818
return 0;
819
}
820
821
static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
822
{
823
struct ip6_tnl *t = netdev_priv(dev);
824
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
825
int encap_limit = -1;
826
struct flowi6 fl6;
827
__u8 dsfield = 0;
828
__u32 mtu;
829
int err;
830
831
if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
832
return -1;
833
834
if (!t->parms.collect_md &&
835
prepare_ip6gre_xmit_ipv6(skb, dev, &fl6, &dsfield, &encap_limit))
836
return -1;
837
838
if (gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
839
t->parms.o_flags)))
840
return -1;
841
842
err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit,
843
&mtu, skb->protocol);
844
if (err != 0) {
845
if (err == -EMSGSIZE)
846
icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
847
return -1;
848
}
849
850
return 0;
851
}
852
853
static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
854
{
855
struct ip6_tnl *t = netdev_priv(dev);
856
int encap_limit = -1;
857
struct flowi6 fl6;
858
__u8 dsfield = 0;
859
__u32 mtu;
860
int err;
861
862
if (!t->parms.collect_md &&
863
prepare_ip6gre_xmit_other(skb, dev, &fl6, &dsfield, &encap_limit))
864
return -1;
865
866
err = gre_handle_offloads(skb, test_bit(IP_TUNNEL_CSUM_BIT,
867
t->parms.o_flags));
868
if (err)
869
return err;
870
err = __gre6_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, skb->protocol);
871
872
return err;
873
}
874
875
static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
876
struct net_device *dev)
877
{
878
struct ip6_tnl *t = netdev_priv(dev);
879
__be16 payload_protocol;
880
int ret;
881
882
if (!pskb_inet_may_pull(skb))
883
goto tx_err;
884
885
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
886
goto tx_err;
887
888
payload_protocol = skb_protocol(skb, true);
889
switch (payload_protocol) {
890
case htons(ETH_P_IP):
891
ret = ip6gre_xmit_ipv4(skb, dev);
892
break;
893
case htons(ETH_P_IPV6):
894
ret = ip6gre_xmit_ipv6(skb, dev);
895
break;
896
default:
897
ret = ip6gre_xmit_other(skb, dev);
898
break;
899
}
900
901
if (ret < 0)
902
goto tx_err;
903
904
return NETDEV_TX_OK;
905
906
tx_err:
907
if (!t->parms.collect_md || !IS_ERR(skb_tunnel_info_txcheck(skb)))
908
DEV_STATS_INC(dev, tx_errors);
909
DEV_STATS_INC(dev, tx_dropped);
910
kfree_skb(skb);
911
return NETDEV_TX_OK;
912
}
913
914
static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
915
struct net_device *dev)
916
{
917
struct ip_tunnel_info *tun_info = NULL;
918
struct ip6_tnl *t = netdev_priv(dev);
919
struct dst_entry *dst = skb_dst(skb);
920
IP_TUNNEL_DECLARE_FLAGS(flags) = { };
921
bool truncate = false;
922
int encap_limit = -1;
923
__u8 dsfield = false;
924
struct flowi6 fl6;
925
int err = -EINVAL;
926
__be16 proto;
927
__u32 mtu;
928
int nhoff;
929
930
if (!pskb_inet_may_pull(skb))
931
goto tx_err;
932
933
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
934
goto tx_err;
935
936
if (gre_handle_offloads(skb, false))
937
goto tx_err;
938
939
if (skb->len > dev->mtu + dev->hard_header_len) {
940
if (pskb_trim(skb, dev->mtu + dev->hard_header_len))
941
goto tx_err;
942
truncate = true;
943
}
944
945
nhoff = skb_network_offset(skb);
946
if (skb->protocol == htons(ETH_P_IP) &&
947
(ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
948
truncate = true;
949
950
if (skb->protocol == htons(ETH_P_IPV6)) {
951
int thoff;
952
953
if (skb_transport_header_was_set(skb))
954
thoff = skb_transport_offset(skb);
955
else
956
thoff = nhoff + sizeof(struct ipv6hdr);
957
if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
958
truncate = true;
959
}
960
961
if (skb_cow_head(skb, dev->needed_headroom ?: t->hlen))
962
goto tx_err;
963
964
__clear_bit(IP_TUNNEL_KEY_BIT, t->parms.o_flags);
965
IPCB(skb)->flags = 0;
966
967
/* For collect_md mode, derive fl6 from the tunnel key,
968
* for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
969
*/
970
if (t->parms.collect_md) {
971
const struct ip_tunnel_key *key;
972
struct erspan_metadata *md;
973
__be32 tun_id;
974
975
tun_info = skb_tunnel_info_txcheck(skb);
976
if (IS_ERR(tun_info) ||
977
unlikely(ip_tunnel_info_af(tun_info) != AF_INET6))
978
goto tx_err;
979
980
key = &tun_info->key;
981
memset(&fl6, 0, sizeof(fl6));
982
fl6.flowi6_proto = IPPROTO_GRE;
983
fl6.daddr = key->u.ipv6.dst;
984
fl6.flowlabel = key->label;
985
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
986
fl6.fl6_gre_key = tunnel_id_to_key32(key->tun_id);
987
988
dsfield = key->tos;
989
if (!test_bit(IP_TUNNEL_ERSPAN_OPT_BIT,
990
tun_info->key.tun_flags))
991
goto tx_err;
992
if (tun_info->options_len < sizeof(*md))
993
goto tx_err;
994
md = ip_tunnel_info_opts(tun_info);
995
996
tun_id = tunnel_id_to_key32(key->tun_id);
997
if (md->version == 1) {
998
erspan_build_header(skb,
999
ntohl(tun_id),
1000
ntohl(md->u.index), truncate,
1001
false);
1002
proto = htons(ETH_P_ERSPAN);
1003
} else if (md->version == 2) {
1004
erspan_build_header_v2(skb,
1005
ntohl(tun_id),
1006
md->u.md2.dir,
1007
get_hwid(&md->u.md2),
1008
truncate, false);
1009
proto = htons(ETH_P_ERSPAN2);
1010
} else {
1011
goto tx_err;
1012
}
1013
} else {
1014
switch (skb->protocol) {
1015
case htons(ETH_P_IP):
1016
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1017
prepare_ip6gre_xmit_ipv4(skb, dev, &fl6,
1018
&dsfield, &encap_limit);
1019
break;
1020
case htons(ETH_P_IPV6):
1021
if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
1022
goto tx_err;
1023
if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
1024
&dsfield, &encap_limit))
1025
goto tx_err;
1026
break;
1027
default:
1028
memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1029
break;
1030
}
1031
1032
if (t->parms.erspan_ver == 1) {
1033
erspan_build_header(skb, ntohl(t->parms.o_key),
1034
t->parms.index,
1035
truncate, false);
1036
proto = htons(ETH_P_ERSPAN);
1037
} else if (t->parms.erspan_ver == 2) {
1038
erspan_build_header_v2(skb, ntohl(t->parms.o_key),
1039
t->parms.dir,
1040
t->parms.hwid,
1041
truncate, false);
1042
proto = htons(ETH_P_ERSPAN2);
1043
} else {
1044
goto tx_err;
1045
}
1046
1047
fl6.daddr = t->parms.raddr;
1048
}
1049
1050
/* Push GRE header. */
1051
__set_bit(IP_TUNNEL_SEQ_BIT, flags);
1052
gre_build_header(skb, 8, flags, proto, 0,
1053
htonl(atomic_fetch_inc(&t->o_seqno)));
1054
1055
/* TooBig packet may have updated dst->dev's mtu */
1056
if (!t->parms.collect_md && dst) {
1057
mtu = READ_ONCE(dst_dev(dst)->mtu);
1058
if (dst_mtu(dst) > mtu)
1059
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
1060
}
1061
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1062
NEXTHDR_GRE);
1063
if (err != 0) {
1064
/* XXX: send ICMP error even if DF is not set. */
1065
if (err == -EMSGSIZE) {
1066
if (skb->protocol == htons(ETH_P_IP))
1067
icmp_ndo_send(skb, ICMP_DEST_UNREACH,
1068
ICMP_FRAG_NEEDED, htonl(mtu));
1069
else
1070
icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1071
}
1072
1073
goto tx_err;
1074
}
1075
return NETDEV_TX_OK;
1076
1077
tx_err:
1078
if (!IS_ERR(tun_info))
1079
DEV_STATS_INC(dev, tx_errors);
1080
DEV_STATS_INC(dev, tx_dropped);
1081
kfree_skb(skb);
1082
return NETDEV_TX_OK;
1083
}
1084
1085
static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
1086
{
1087
struct net_device *dev = t->dev;
1088
struct __ip6_tnl_parm *p = &t->parms;
1089
struct flowi6 *fl6 = &t->fl.u.ip6;
1090
1091
if (dev->type != ARPHRD_ETHER) {
1092
__dev_addr_set(dev, &p->laddr, sizeof(struct in6_addr));
1093
memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1094
}
1095
1096
/* Set up flowi template */
1097
fl6->saddr = p->laddr;
1098
fl6->daddr = p->raddr;
1099
fl6->flowi6_oif = p->link;
1100
fl6->flowlabel = 0;
1101
fl6->flowi6_proto = IPPROTO_GRE;
1102
fl6->fl6_gre_key = t->parms.o_key;
1103
1104
if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1105
fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1106
if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1107
fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1108
1109
p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1110
p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1111
1112
if (p->flags&IP6_TNL_F_CAP_XMIT &&
1113
p->flags&IP6_TNL_F_CAP_RCV && dev->type != ARPHRD_ETHER)
1114
dev->flags |= IFF_POINTOPOINT;
1115
else
1116
dev->flags &= ~IFF_POINTOPOINT;
1117
}
1118
1119
static void ip6gre_tnl_link_config_route(struct ip6_tnl *t, int set_mtu,
1120
int t_hlen)
1121
{
1122
const struct __ip6_tnl_parm *p = &t->parms;
1123
struct net_device *dev = t->dev;
1124
1125
if (p->flags & IP6_TNL_F_CAP_XMIT) {
1126
int strict = (ipv6_addr_type(&p->raddr) &
1127
(IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1128
1129
struct rt6_info *rt = rt6_lookup(t->net,
1130
&p->raddr, &p->laddr,
1131
p->link, NULL, strict);
1132
1133
if (!rt)
1134
return;
1135
1136
if (rt->dst.dev) {
1137
unsigned short dst_len = rt->dst.dev->hard_header_len +
1138
t_hlen;
1139
1140
if (t->dev->header_ops)
1141
dev->hard_header_len = dst_len;
1142
else
1143
dev->needed_headroom = dst_len;
1144
1145
if (set_mtu) {
1146
int mtu = rt->dst.dev->mtu - t_hlen;
1147
1148
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1149
mtu -= 8;
1150
if (dev->type == ARPHRD_ETHER)
1151
mtu -= ETH_HLEN;
1152
1153
if (mtu < IPV6_MIN_MTU)
1154
mtu = IPV6_MIN_MTU;
1155
WRITE_ONCE(dev->mtu, mtu);
1156
}
1157
}
1158
ip6_rt_put(rt);
1159
}
1160
}
1161
1162
static int ip6gre_calc_hlen(struct ip6_tnl *tunnel)
1163
{
1164
int t_hlen;
1165
1166
tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
1167
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
1168
1169
t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1170
1171
if (tunnel->dev->header_ops)
1172
tunnel->dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1173
else
1174
tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1175
1176
return t_hlen;
1177
}
1178
1179
static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
1180
{
1181
ip6gre_tnl_link_config_common(t);
1182
ip6gre_tnl_link_config_route(t, set_mtu, ip6gre_calc_hlen(t));
1183
}
1184
1185
static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t,
1186
const struct __ip6_tnl_parm *p)
1187
{
1188
t->parms.laddr = p->laddr;
1189
t->parms.raddr = p->raddr;
1190
t->parms.flags = p->flags;
1191
t->parms.hop_limit = p->hop_limit;
1192
t->parms.encap_limit = p->encap_limit;
1193
t->parms.flowinfo = p->flowinfo;
1194
t->parms.link = p->link;
1195
t->parms.proto = p->proto;
1196
t->parms.i_key = p->i_key;
1197
t->parms.o_key = p->o_key;
1198
ip_tunnel_flags_copy(t->parms.i_flags, p->i_flags);
1199
ip_tunnel_flags_copy(t->parms.o_flags, p->o_flags);
1200
t->parms.fwmark = p->fwmark;
1201
t->parms.erspan_ver = p->erspan_ver;
1202
t->parms.index = p->index;
1203
t->parms.dir = p->dir;
1204
t->parms.hwid = p->hwid;
1205
dst_cache_reset(&t->dst_cache);
1206
}
1207
1208
static int ip6gre_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p,
1209
int set_mtu)
1210
{
1211
ip6gre_tnl_copy_tnl_parm(t, p);
1212
ip6gre_tnl_link_config(t, set_mtu);
1213
return 0;
1214
}
1215
1216
static void ip6gre_tnl_parm_from_user(struct __ip6_tnl_parm *p,
1217
const struct ip6_tnl_parm2 *u)
1218
{
1219
p->laddr = u->laddr;
1220
p->raddr = u->raddr;
1221
p->flags = u->flags;
1222
p->hop_limit = u->hop_limit;
1223
p->encap_limit = u->encap_limit;
1224
p->flowinfo = u->flowinfo;
1225
p->link = u->link;
1226
p->i_key = u->i_key;
1227
p->o_key = u->o_key;
1228
gre_flags_to_tnl_flags(p->i_flags, u->i_flags);
1229
gre_flags_to_tnl_flags(p->o_flags, u->o_flags);
1230
memcpy(p->name, u->name, sizeof(u->name));
1231
}
1232
1233
static void ip6gre_tnl_parm_to_user(struct ip6_tnl_parm2 *u,
1234
const struct __ip6_tnl_parm *p)
1235
{
1236
u->proto = IPPROTO_GRE;
1237
u->laddr = p->laddr;
1238
u->raddr = p->raddr;
1239
u->flags = p->flags;
1240
u->hop_limit = p->hop_limit;
1241
u->encap_limit = p->encap_limit;
1242
u->flowinfo = p->flowinfo;
1243
u->link = p->link;
1244
u->i_key = p->i_key;
1245
u->o_key = p->o_key;
1246
u->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
1247
u->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
1248
memcpy(u->name, p->name, sizeof(u->name));
1249
}
1250
1251
static int ip6gre_tunnel_siocdevprivate(struct net_device *dev,
1252
struct ifreq *ifr, void __user *data,
1253
int cmd)
1254
{
1255
int err = 0;
1256
struct ip6_tnl_parm2 p;
1257
struct __ip6_tnl_parm p1;
1258
struct ip6_tnl *t = netdev_priv(dev);
1259
struct net *net = t->net;
1260
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1261
1262
memset(&p1, 0, sizeof(p1));
1263
1264
switch (cmd) {
1265
case SIOCGETTUNNEL:
1266
if (dev == ign->fb_tunnel_dev) {
1267
if (copy_from_user(&p, data, sizeof(p))) {
1268
err = -EFAULT;
1269
break;
1270
}
1271
ip6gre_tnl_parm_from_user(&p1, &p);
1272
t = ip6gre_tunnel_locate(net, &p1, 0);
1273
if (!t)
1274
t = netdev_priv(dev);
1275
}
1276
memset(&p, 0, sizeof(p));
1277
ip6gre_tnl_parm_to_user(&p, &t->parms);
1278
if (copy_to_user(data, &p, sizeof(p)))
1279
err = -EFAULT;
1280
break;
1281
1282
case SIOCADDTUNNEL:
1283
case SIOCCHGTUNNEL:
1284
err = -EPERM;
1285
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1286
goto done;
1287
1288
err = -EFAULT;
1289
if (copy_from_user(&p, data, sizeof(p)))
1290
goto done;
1291
1292
err = -EINVAL;
1293
if ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))
1294
goto done;
1295
1296
if (!(p.i_flags&GRE_KEY))
1297
p.i_key = 0;
1298
if (!(p.o_flags&GRE_KEY))
1299
p.o_key = 0;
1300
1301
ip6gre_tnl_parm_from_user(&p1, &p);
1302
t = ip6gre_tunnel_locate(net, &p1, cmd == SIOCADDTUNNEL);
1303
1304
if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1305
if (t) {
1306
if (t->dev != dev) {
1307
err = -EEXIST;
1308
break;
1309
}
1310
} else {
1311
t = netdev_priv(dev);
1312
1313
ip6gre_tunnel_unlink(ign, t);
1314
synchronize_net();
1315
ip6gre_tnl_change(t, &p1, 1);
1316
ip6gre_tunnel_link(ign, t);
1317
netdev_state_change(dev);
1318
}
1319
}
1320
1321
if (t) {
1322
err = 0;
1323
1324
memset(&p, 0, sizeof(p));
1325
ip6gre_tnl_parm_to_user(&p, &t->parms);
1326
if (copy_to_user(data, &p, sizeof(p)))
1327
err = -EFAULT;
1328
} else
1329
err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1330
break;
1331
1332
case SIOCDELTUNNEL:
1333
err = -EPERM;
1334
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1335
goto done;
1336
1337
if (dev == ign->fb_tunnel_dev) {
1338
err = -EFAULT;
1339
if (copy_from_user(&p, data, sizeof(p)))
1340
goto done;
1341
err = -ENOENT;
1342
ip6gre_tnl_parm_from_user(&p1, &p);
1343
t = ip6gre_tunnel_locate(net, &p1, 0);
1344
if (!t)
1345
goto done;
1346
err = -EPERM;
1347
if (t == netdev_priv(ign->fb_tunnel_dev))
1348
goto done;
1349
dev = t->dev;
1350
}
1351
unregister_netdevice(dev);
1352
err = 0;
1353
break;
1354
1355
default:
1356
err = -EINVAL;
1357
}
1358
1359
done:
1360
return err;
1361
}
1362
1363
static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
1364
unsigned short type, const void *daddr,
1365
const void *saddr, unsigned int len)
1366
{
1367
struct ip6_tnl *t = netdev_priv(dev);
1368
struct ipv6hdr *ipv6h;
1369
__be16 *p;
1370
1371
ipv6h = skb_push(skb, t->hlen + sizeof(*ipv6h));
1372
ip6_flow_hdr(ipv6h, 0, ip6_make_flowlabel(dev_net(dev), skb,
1373
t->fl.u.ip6.flowlabel,
1374
true, &t->fl.u.ip6));
1375
ipv6h->hop_limit = t->parms.hop_limit;
1376
ipv6h->nexthdr = NEXTHDR_GRE;
1377
ipv6h->saddr = t->parms.laddr;
1378
ipv6h->daddr = t->parms.raddr;
1379
1380
p = (__be16 *)(ipv6h + 1);
1381
p[0] = ip_tunnel_flags_to_be16(t->parms.o_flags);
1382
p[1] = htons(type);
1383
1384
/*
1385
* Set the source hardware address.
1386
*/
1387
1388
if (saddr)
1389
memcpy(&ipv6h->saddr, saddr, sizeof(struct in6_addr));
1390
if (daddr)
1391
memcpy(&ipv6h->daddr, daddr, sizeof(struct in6_addr));
1392
if (!ipv6_addr_any(&ipv6h->daddr))
1393
return t->hlen;
1394
1395
return -t->hlen;
1396
}
1397
1398
static const struct header_ops ip6gre_header_ops = {
1399
.create = ip6gre_header,
1400
};
1401
1402
static const struct net_device_ops ip6gre_netdev_ops = {
1403
.ndo_init = ip6gre_tunnel_init,
1404
.ndo_uninit = ip6gre_tunnel_uninit,
1405
.ndo_start_xmit = ip6gre_tunnel_xmit,
1406
.ndo_siocdevprivate = ip6gre_tunnel_siocdevprivate,
1407
.ndo_change_mtu = ip6_tnl_change_mtu,
1408
.ndo_get_iflink = ip6_tnl_get_iflink,
1409
};
1410
1411
static void ip6gre_dev_free(struct net_device *dev)
1412
{
1413
struct ip6_tnl *t = netdev_priv(dev);
1414
1415
gro_cells_destroy(&t->gro_cells);
1416
dst_cache_destroy(&t->dst_cache);
1417
}
1418
1419
static void ip6gre_tunnel_setup(struct net_device *dev)
1420
{
1421
dev->netdev_ops = &ip6gre_netdev_ops;
1422
dev->needs_free_netdev = true;
1423
dev->priv_destructor = ip6gre_dev_free;
1424
1425
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1426
dev->type = ARPHRD_IP6GRE;
1427
1428
dev->flags |= IFF_NOARP;
1429
dev->addr_len = sizeof(struct in6_addr);
1430
netif_keep_dst(dev);
1431
/* This perm addr will be used as interface identifier by IPv6 */
1432
dev->addr_assign_type = NET_ADDR_RANDOM;
1433
eth_random_addr(dev->perm_addr);
1434
}
1435
1436
#define GRE6_FEATURES (NETIF_F_SG | \
1437
NETIF_F_FRAGLIST | \
1438
NETIF_F_HIGHDMA | \
1439
NETIF_F_HW_CSUM)
1440
1441
static void ip6gre_tnl_init_features(struct net_device *dev)
1442
{
1443
struct ip6_tnl *nt = netdev_priv(dev);
1444
1445
dev->features |= GRE6_FEATURES;
1446
dev->hw_features |= GRE6_FEATURES;
1447
1448
/* TCP offload with GRE SEQ is not supported, nor can we support 2
1449
* levels of outer headers requiring an update.
1450
*/
1451
if (test_bit(IP_TUNNEL_SEQ_BIT, nt->parms.o_flags))
1452
return;
1453
if (test_bit(IP_TUNNEL_CSUM_BIT, nt->parms.o_flags) &&
1454
nt->encap.type != TUNNEL_ENCAP_NONE)
1455
return;
1456
1457
dev->features |= NETIF_F_GSO_SOFTWARE;
1458
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1459
1460
dev->lltx = true;
1461
}
1462
1463
static int ip6gre_tunnel_init_common(struct net_device *dev)
1464
{
1465
struct ip6_tnl *tunnel;
1466
int ret;
1467
int t_hlen;
1468
1469
tunnel = netdev_priv(dev);
1470
1471
tunnel->dev = dev;
1472
strcpy(tunnel->parms.name, dev->name);
1473
1474
ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1475
if (ret)
1476
return ret;
1477
1478
ret = gro_cells_init(&tunnel->gro_cells, dev);
1479
if (ret)
1480
goto cleanup_dst_cache_init;
1481
1482
t_hlen = ip6gre_calc_hlen(tunnel);
1483
dev->mtu = ETH_DATA_LEN - t_hlen;
1484
if (dev->type == ARPHRD_ETHER)
1485
dev->mtu -= ETH_HLEN;
1486
if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1487
dev->mtu -= 8;
1488
1489
if (tunnel->parms.collect_md) {
1490
netif_keep_dst(dev);
1491
}
1492
ip6gre_tnl_init_features(dev);
1493
1494
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
1495
netdev_lockdep_set_classes(dev);
1496
return 0;
1497
1498
cleanup_dst_cache_init:
1499
dst_cache_destroy(&tunnel->dst_cache);
1500
return ret;
1501
}
1502
1503
static int ip6gre_tunnel_init(struct net_device *dev)
1504
{
1505
struct ip6_tnl *tunnel;
1506
int ret;
1507
1508
ret = ip6gre_tunnel_init_common(dev);
1509
if (ret)
1510
return ret;
1511
1512
tunnel = netdev_priv(dev);
1513
1514
if (tunnel->parms.collect_md)
1515
return 0;
1516
1517
__dev_addr_set(dev, &tunnel->parms.laddr, sizeof(struct in6_addr));
1518
memcpy(dev->broadcast, &tunnel->parms.raddr, sizeof(struct in6_addr));
1519
1520
if (ipv6_addr_any(&tunnel->parms.raddr))
1521
dev->header_ops = &ip6gre_header_ops;
1522
1523
return 0;
1524
}
1525
1526
static void ip6gre_fb_tunnel_init(struct net_device *dev)
1527
{
1528
struct ip6_tnl *tunnel = netdev_priv(dev);
1529
1530
tunnel->dev = dev;
1531
tunnel->net = dev_net(dev);
1532
strcpy(tunnel->parms.name, dev->name);
1533
1534
tunnel->hlen = sizeof(struct ipv6hdr) + 4;
1535
}
1536
1537
static struct inet6_protocol ip6gre_protocol __read_mostly = {
1538
.handler = gre_rcv,
1539
.err_handler = ip6gre_err,
1540
.flags = INET6_PROTO_FINAL,
1541
};
1542
1543
static void __net_exit ip6gre_exit_rtnl_net(struct net *net, struct list_head *head)
1544
{
1545
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1546
struct net_device *dev, *aux;
1547
int prio;
1548
1549
for_each_netdev_safe(net, dev, aux)
1550
if (dev->rtnl_link_ops == &ip6gre_link_ops ||
1551
dev->rtnl_link_ops == &ip6gre_tap_ops ||
1552
dev->rtnl_link_ops == &ip6erspan_tap_ops)
1553
unregister_netdevice_queue(dev, head);
1554
1555
for (prio = 0; prio < 4; prio++) {
1556
int h;
1557
for (h = 0; h < IP6_GRE_HASH_SIZE; h++) {
1558
struct ip6_tnl *t;
1559
1560
t = rtnl_net_dereference(net, ign->tunnels[prio][h]);
1561
1562
while (t) {
1563
/* If dev is in the same netns, it has already
1564
* been added to the list by the previous loop.
1565
*/
1566
if (!net_eq(dev_net(t->dev), net))
1567
unregister_netdevice_queue(t->dev, head);
1568
1569
t = rtnl_net_dereference(net, t->next);
1570
}
1571
}
1572
}
1573
}
1574
1575
static int __net_init ip6gre_init_net(struct net *net)
1576
{
1577
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
1578
struct net_device *ndev;
1579
int err;
1580
1581
if (!net_has_fallback_tunnels(net))
1582
return 0;
1583
ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
1584
NET_NAME_UNKNOWN, ip6gre_tunnel_setup);
1585
if (!ndev) {
1586
err = -ENOMEM;
1587
goto err_alloc_dev;
1588
}
1589
ign->fb_tunnel_dev = ndev;
1590
dev_net_set(ign->fb_tunnel_dev, net);
1591
/* FB netdevice is special: we have one, and only one per netns.
1592
* Allowing to move it to another netns is clearly unsafe.
1593
*/
1594
ign->fb_tunnel_dev->netns_immutable = true;
1595
1596
ip6gre_fb_tunnel_init(ign->fb_tunnel_dev);
1597
ign->fb_tunnel_dev->rtnl_link_ops = &ip6gre_link_ops;
1598
1599
err = register_netdev(ign->fb_tunnel_dev);
1600
if (err)
1601
goto err_reg_dev;
1602
1603
rcu_assign_pointer(ign->tunnels_wc[0],
1604
netdev_priv(ign->fb_tunnel_dev));
1605
return 0;
1606
1607
err_reg_dev:
1608
free_netdev(ndev);
1609
err_alloc_dev:
1610
return err;
1611
}
1612
1613
static struct pernet_operations ip6gre_net_ops = {
1614
.init = ip6gre_init_net,
1615
.exit_rtnl = ip6gre_exit_rtnl_net,
1616
.id = &ip6gre_net_id,
1617
.size = sizeof(struct ip6gre_net),
1618
};
1619
1620
static int ip6gre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1621
struct netlink_ext_ack *extack)
1622
{
1623
__be16 flags;
1624
1625
if (!data)
1626
return 0;
1627
1628
flags = 0;
1629
if (data[IFLA_GRE_IFLAGS])
1630
flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1631
if (data[IFLA_GRE_OFLAGS])
1632
flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1633
if (flags & (GRE_VERSION|GRE_ROUTING))
1634
return -EINVAL;
1635
1636
return 0;
1637
}
1638
1639
static int ip6gre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1640
struct netlink_ext_ack *extack)
1641
{
1642
struct in6_addr daddr;
1643
1644
if (tb[IFLA_ADDRESS]) {
1645
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1646
return -EINVAL;
1647
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1648
return -EADDRNOTAVAIL;
1649
}
1650
1651
if (!data)
1652
goto out;
1653
1654
if (data[IFLA_GRE_REMOTE]) {
1655
daddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1656
if (ipv6_addr_any(&daddr))
1657
return -EINVAL;
1658
}
1659
1660
out:
1661
return ip6gre_tunnel_validate(tb, data, extack);
1662
}
1663
1664
static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1665
struct netlink_ext_ack *extack)
1666
{
1667
__be16 flags = 0;
1668
int ret, ver = 0;
1669
1670
if (!data)
1671
return 0;
1672
1673
ret = ip6gre_tap_validate(tb, data, extack);
1674
if (ret)
1675
return ret;
1676
1677
/* ERSPAN should only have GRE sequence and key flag */
1678
if (data[IFLA_GRE_OFLAGS])
1679
flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1680
if (data[IFLA_GRE_IFLAGS])
1681
flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1682
if (!data[IFLA_GRE_COLLECT_METADATA] &&
1683
flags != (GRE_SEQ | GRE_KEY))
1684
return -EINVAL;
1685
1686
/* ERSPAN Session ID only has 10-bit. Since we reuse
1687
* 32-bit key field as ID, check it's range.
1688
*/
1689
if (data[IFLA_GRE_IKEY] &&
1690
(ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1691
return -EINVAL;
1692
1693
if (data[IFLA_GRE_OKEY] &&
1694
(ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1695
return -EINVAL;
1696
1697
if (data[IFLA_GRE_ERSPAN_VER]) {
1698
ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1699
if (ver != 1 && ver != 2)
1700
return -EINVAL;
1701
}
1702
1703
if (ver == 1) {
1704
if (data[IFLA_GRE_ERSPAN_INDEX]) {
1705
u32 index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1706
1707
if (index & ~INDEX_MASK)
1708
return -EINVAL;
1709
}
1710
} else if (ver == 2) {
1711
if (data[IFLA_GRE_ERSPAN_DIR]) {
1712
u16 dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1713
1714
if (dir & ~(DIR_MASK >> DIR_OFFSET))
1715
return -EINVAL;
1716
}
1717
1718
if (data[IFLA_GRE_ERSPAN_HWID]) {
1719
u16 hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1720
1721
if (hwid & ~(HWID_MASK >> HWID_OFFSET))
1722
return -EINVAL;
1723
}
1724
}
1725
1726
return 0;
1727
}
1728
1729
static void ip6erspan_set_version(struct nlattr *data[],
1730
struct __ip6_tnl_parm *parms)
1731
{
1732
if (!data)
1733
return;
1734
1735
parms->erspan_ver = 1;
1736
if (data[IFLA_GRE_ERSPAN_VER])
1737
parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1738
1739
if (parms->erspan_ver == 1) {
1740
if (data[IFLA_GRE_ERSPAN_INDEX])
1741
parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1742
} else if (parms->erspan_ver == 2) {
1743
if (data[IFLA_GRE_ERSPAN_DIR])
1744
parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1745
if (data[IFLA_GRE_ERSPAN_HWID])
1746
parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1747
}
1748
}
1749
1750
static void ip6gre_netlink_parms(struct nlattr *data[],
1751
struct __ip6_tnl_parm *parms)
1752
{
1753
memset(parms, 0, sizeof(*parms));
1754
1755
if (!data)
1756
return;
1757
1758
if (data[IFLA_GRE_LINK])
1759
parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1760
1761
if (data[IFLA_GRE_IFLAGS])
1762
gre_flags_to_tnl_flags(parms->i_flags,
1763
nla_get_be16(data[IFLA_GRE_IFLAGS]));
1764
1765
if (data[IFLA_GRE_OFLAGS])
1766
gre_flags_to_tnl_flags(parms->o_flags,
1767
nla_get_be16(data[IFLA_GRE_OFLAGS]));
1768
1769
if (data[IFLA_GRE_IKEY])
1770
parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1771
1772
if (data[IFLA_GRE_OKEY])
1773
parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1774
1775
if (data[IFLA_GRE_LOCAL])
1776
parms->laddr = nla_get_in6_addr(data[IFLA_GRE_LOCAL]);
1777
1778
if (data[IFLA_GRE_REMOTE])
1779
parms->raddr = nla_get_in6_addr(data[IFLA_GRE_REMOTE]);
1780
1781
if (data[IFLA_GRE_TTL])
1782
parms->hop_limit = nla_get_u8(data[IFLA_GRE_TTL]);
1783
1784
if (data[IFLA_GRE_ENCAP_LIMIT])
1785
parms->encap_limit = nla_get_u8(data[IFLA_GRE_ENCAP_LIMIT]);
1786
1787
if (data[IFLA_GRE_FLOWINFO])
1788
parms->flowinfo = nla_get_be32(data[IFLA_GRE_FLOWINFO]);
1789
1790
if (data[IFLA_GRE_FLAGS])
1791
parms->flags = nla_get_u32(data[IFLA_GRE_FLAGS]);
1792
1793
if (data[IFLA_GRE_FWMARK])
1794
parms->fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1795
1796
if (data[IFLA_GRE_COLLECT_METADATA])
1797
parms->collect_md = true;
1798
}
1799
1800
static int ip6gre_tap_init(struct net_device *dev)
1801
{
1802
int ret;
1803
1804
ret = ip6gre_tunnel_init_common(dev);
1805
if (ret)
1806
return ret;
1807
1808
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1809
1810
return 0;
1811
}
1812
1813
static const struct net_device_ops ip6gre_tap_netdev_ops = {
1814
.ndo_init = ip6gre_tap_init,
1815
.ndo_uninit = ip6gre_tunnel_uninit,
1816
.ndo_start_xmit = ip6gre_tunnel_xmit,
1817
.ndo_set_mac_address = eth_mac_addr,
1818
.ndo_validate_addr = eth_validate_addr,
1819
.ndo_change_mtu = ip6_tnl_change_mtu,
1820
.ndo_get_iflink = ip6_tnl_get_iflink,
1821
};
1822
1823
static int ip6erspan_calc_hlen(struct ip6_tnl *tunnel)
1824
{
1825
int t_hlen;
1826
1827
tunnel->tun_hlen = 8;
1828
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1829
erspan_hdr_len(tunnel->parms.erspan_ver);
1830
1831
t_hlen = tunnel->hlen + sizeof(struct ipv6hdr);
1832
tunnel->dev->needed_headroom = LL_MAX_HEADER + t_hlen;
1833
return t_hlen;
1834
}
1835
1836
static int ip6erspan_tap_init(struct net_device *dev)
1837
{
1838
struct ip6_tnl *tunnel;
1839
int t_hlen;
1840
int ret;
1841
1842
tunnel = netdev_priv(dev);
1843
1844
tunnel->dev = dev;
1845
strcpy(tunnel->parms.name, dev->name);
1846
1847
ret = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL);
1848
if (ret)
1849
return ret;
1850
1851
ret = gro_cells_init(&tunnel->gro_cells, dev);
1852
if (ret)
1853
goto cleanup_dst_cache_init;
1854
1855
t_hlen = ip6erspan_calc_hlen(tunnel);
1856
dev->mtu = ETH_DATA_LEN - t_hlen;
1857
if (dev->type == ARPHRD_ETHER)
1858
dev->mtu -= ETH_HLEN;
1859
if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1860
dev->mtu -= 8;
1861
1862
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1863
ip6erspan_tnl_link_config(tunnel, 1);
1864
1865
netdev_hold(dev, &tunnel->dev_tracker, GFP_KERNEL);
1866
netdev_lockdep_set_classes(dev);
1867
return 0;
1868
1869
cleanup_dst_cache_init:
1870
dst_cache_destroy(&tunnel->dst_cache);
1871
return ret;
1872
}
1873
1874
static const struct net_device_ops ip6erspan_netdev_ops = {
1875
.ndo_init = ip6erspan_tap_init,
1876
.ndo_uninit = ip6erspan_tunnel_uninit,
1877
.ndo_start_xmit = ip6erspan_tunnel_xmit,
1878
.ndo_set_mac_address = eth_mac_addr,
1879
.ndo_validate_addr = eth_validate_addr,
1880
.ndo_change_mtu = ip6_tnl_change_mtu,
1881
.ndo_get_iflink = ip6_tnl_get_iflink,
1882
};
1883
1884
static void ip6gre_tap_setup(struct net_device *dev)
1885
{
1886
1887
ether_setup(dev);
1888
1889
dev->max_mtu = 0;
1890
dev->netdev_ops = &ip6gre_tap_netdev_ops;
1891
dev->needs_free_netdev = true;
1892
dev->priv_destructor = ip6gre_dev_free;
1893
1894
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
1895
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1896
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1897
netif_keep_dst(dev);
1898
}
1899
1900
static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
1901
struct ip_tunnel_encap *ipencap)
1902
{
1903
bool ret = false;
1904
1905
memset(ipencap, 0, sizeof(*ipencap));
1906
1907
if (!data)
1908
return ret;
1909
1910
if (data[IFLA_GRE_ENCAP_TYPE]) {
1911
ret = true;
1912
ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1913
}
1914
1915
if (data[IFLA_GRE_ENCAP_FLAGS]) {
1916
ret = true;
1917
ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1918
}
1919
1920
if (data[IFLA_GRE_ENCAP_SPORT]) {
1921
ret = true;
1922
ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1923
}
1924
1925
if (data[IFLA_GRE_ENCAP_DPORT]) {
1926
ret = true;
1927
ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1928
}
1929
1930
return ret;
1931
}
1932
1933
static int ip6gre_newlink_common(struct net *link_net, struct net_device *dev,
1934
struct nlattr *tb[], struct nlattr *data[],
1935
struct netlink_ext_ack *extack)
1936
{
1937
struct ip6_tnl *nt;
1938
struct ip_tunnel_encap ipencap;
1939
int err;
1940
1941
nt = netdev_priv(dev);
1942
1943
if (ip6gre_netlink_encap_parms(data, &ipencap)) {
1944
int err = ip6_tnl_encap_setup(nt, &ipencap);
1945
1946
if (err < 0)
1947
return err;
1948
}
1949
1950
if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1951
eth_hw_addr_random(dev);
1952
1953
nt->dev = dev;
1954
nt->net = link_net;
1955
1956
err = register_netdevice(dev);
1957
if (err)
1958
goto out;
1959
1960
if (tb[IFLA_MTU])
1961
ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1962
1963
out:
1964
return err;
1965
}
1966
1967
static int ip6gre_newlink(struct net_device *dev,
1968
struct rtnl_newlink_params *params,
1969
struct netlink_ext_ack *extack)
1970
{
1971
struct net *net = params->link_net ? : dev_net(dev);
1972
struct ip6_tnl *nt = netdev_priv(dev);
1973
struct nlattr **data = params->data;
1974
struct nlattr **tb = params->tb;
1975
struct ip6gre_net *ign;
1976
int err;
1977
1978
ip6gre_netlink_parms(data, &nt->parms);
1979
ign = net_generic(net, ip6gre_net_id);
1980
1981
if (nt->parms.collect_md) {
1982
if (rtnl_dereference(ign->collect_md_tun))
1983
return -EEXIST;
1984
} else {
1985
if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
1986
return -EEXIST;
1987
}
1988
1989
err = ip6gre_newlink_common(net, dev, tb, data, extack);
1990
if (!err) {
1991
ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1992
ip6gre_tunnel_link_md(ign, nt);
1993
ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
1994
}
1995
return err;
1996
}
1997
1998
static struct ip6_tnl *
1999
ip6gre_changelink_common(struct net_device *dev, struct nlattr *tb[],
2000
struct nlattr *data[], struct __ip6_tnl_parm *p_p,
2001
struct netlink_ext_ack *extack)
2002
{
2003
struct ip6_tnl *t, *nt = netdev_priv(dev);
2004
struct net *net = nt->net;
2005
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2006
struct ip_tunnel_encap ipencap;
2007
2008
if (dev == ign->fb_tunnel_dev)
2009
return ERR_PTR(-EINVAL);
2010
2011
if (ip6gre_netlink_encap_parms(data, &ipencap)) {
2012
int err = ip6_tnl_encap_setup(nt, &ipencap);
2013
2014
if (err < 0)
2015
return ERR_PTR(err);
2016
}
2017
2018
ip6gre_netlink_parms(data, p_p);
2019
2020
t = ip6gre_tunnel_locate(net, p_p, 0);
2021
2022
if (t) {
2023
if (t->dev != dev)
2024
return ERR_PTR(-EEXIST);
2025
} else {
2026
t = nt;
2027
}
2028
2029
return t;
2030
}
2031
2032
static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
2033
struct nlattr *data[],
2034
struct netlink_ext_ack *extack)
2035
{
2036
struct ip6_tnl *t = netdev_priv(dev);
2037
struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
2038
struct __ip6_tnl_parm p;
2039
2040
t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2041
if (IS_ERR(t))
2042
return PTR_ERR(t);
2043
2044
ip6gre_tunnel_unlink_md(ign, t);
2045
ip6gre_tunnel_unlink(ign, t);
2046
ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
2047
ip6gre_tunnel_link_md(ign, t);
2048
ip6gre_tunnel_link(ign, t);
2049
return 0;
2050
}
2051
2052
static void ip6gre_dellink(struct net_device *dev, struct list_head *head)
2053
{
2054
struct net *net = dev_net(dev);
2055
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
2056
2057
if (dev != ign->fb_tunnel_dev)
2058
unregister_netdevice_queue(dev, head);
2059
}
2060
2061
static size_t ip6gre_get_size(const struct net_device *dev)
2062
{
2063
return
2064
/* IFLA_GRE_LINK */
2065
nla_total_size(4) +
2066
/* IFLA_GRE_IFLAGS */
2067
nla_total_size(2) +
2068
/* IFLA_GRE_OFLAGS */
2069
nla_total_size(2) +
2070
/* IFLA_GRE_IKEY */
2071
nla_total_size(4) +
2072
/* IFLA_GRE_OKEY */
2073
nla_total_size(4) +
2074
/* IFLA_GRE_LOCAL */
2075
nla_total_size(sizeof(struct in6_addr)) +
2076
/* IFLA_GRE_REMOTE */
2077
nla_total_size(sizeof(struct in6_addr)) +
2078
/* IFLA_GRE_TTL */
2079
nla_total_size(1) +
2080
/* IFLA_GRE_ENCAP_LIMIT */
2081
nla_total_size(1) +
2082
/* IFLA_GRE_FLOWINFO */
2083
nla_total_size(4) +
2084
/* IFLA_GRE_FLAGS */
2085
nla_total_size(4) +
2086
/* IFLA_GRE_ENCAP_TYPE */
2087
nla_total_size(2) +
2088
/* IFLA_GRE_ENCAP_FLAGS */
2089
nla_total_size(2) +
2090
/* IFLA_GRE_ENCAP_SPORT */
2091
nla_total_size(2) +
2092
/* IFLA_GRE_ENCAP_DPORT */
2093
nla_total_size(2) +
2094
/* IFLA_GRE_COLLECT_METADATA */
2095
nla_total_size(0) +
2096
/* IFLA_GRE_FWMARK */
2097
nla_total_size(4) +
2098
/* IFLA_GRE_ERSPAN_INDEX */
2099
nla_total_size(4) +
2100
0;
2101
}
2102
2103
static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
2104
{
2105
struct ip6_tnl *t = netdev_priv(dev);
2106
struct __ip6_tnl_parm *p = &t->parms;
2107
IP_TUNNEL_DECLARE_FLAGS(o_flags);
2108
2109
ip_tunnel_flags_copy(o_flags, p->o_flags);
2110
2111
if (p->erspan_ver == 1 || p->erspan_ver == 2) {
2112
if (!p->collect_md)
2113
__set_bit(IP_TUNNEL_KEY_BIT, o_flags);
2114
2115
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver))
2116
goto nla_put_failure;
2117
2118
if (p->erspan_ver == 1) {
2119
if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index))
2120
goto nla_put_failure;
2121
} else {
2122
if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir))
2123
goto nla_put_failure;
2124
if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid))
2125
goto nla_put_failure;
2126
}
2127
}
2128
2129
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
2130
nla_put_be16(skb, IFLA_GRE_IFLAGS,
2131
gre_tnl_flags_to_gre_flags(p->i_flags)) ||
2132
nla_put_be16(skb, IFLA_GRE_OFLAGS,
2133
gre_tnl_flags_to_gre_flags(o_flags)) ||
2134
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
2135
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
2136
nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
2137
nla_put_in6_addr(skb, IFLA_GRE_REMOTE, &p->raddr) ||
2138
nla_put_u8(skb, IFLA_GRE_TTL, p->hop_limit) ||
2139
nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) ||
2140
nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) ||
2141
nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) ||
2142
nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark))
2143
goto nla_put_failure;
2144
2145
if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
2146
t->encap.type) ||
2147
nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
2148
t->encap.sport) ||
2149
nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
2150
t->encap.dport) ||
2151
nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
2152
t->encap.flags))
2153
goto nla_put_failure;
2154
2155
if (p->collect_md) {
2156
if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
2157
goto nla_put_failure;
2158
}
2159
2160
return 0;
2161
2162
nla_put_failure:
2163
return -EMSGSIZE;
2164
}
2165
2166
static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
2167
[IFLA_GRE_LINK] = { .type = NLA_U32 },
2168
[IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
2169
[IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
2170
[IFLA_GRE_IKEY] = { .type = NLA_U32 },
2171
[IFLA_GRE_OKEY] = { .type = NLA_U32 },
2172
[IFLA_GRE_LOCAL] = { .len = sizeof_field(struct ipv6hdr, saddr) },
2173
[IFLA_GRE_REMOTE] = { .len = sizeof_field(struct ipv6hdr, daddr) },
2174
[IFLA_GRE_TTL] = { .type = NLA_U8 },
2175
[IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 },
2176
[IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
2177
[IFLA_GRE_FLAGS] = { .type = NLA_U32 },
2178
[IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
2179
[IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
2180
[IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
2181
[IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
2182
[IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
2183
[IFLA_GRE_FWMARK] = { .type = NLA_U32 },
2184
[IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
2185
[IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
2186
[IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
2187
[IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
2188
};
2189
2190
static void ip6erspan_tap_setup(struct net_device *dev)
2191
{
2192
ether_setup(dev);
2193
2194
dev->max_mtu = 0;
2195
dev->netdev_ops = &ip6erspan_netdev_ops;
2196
dev->needs_free_netdev = true;
2197
dev->priv_destructor = ip6gre_dev_free;
2198
2199
dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
2200
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2201
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2202
netif_keep_dst(dev);
2203
}
2204
2205
static int ip6erspan_newlink(struct net_device *dev,
2206
struct rtnl_newlink_params *params,
2207
struct netlink_ext_ack *extack)
2208
{
2209
struct net *net = params->link_net ? : dev_net(dev);
2210
struct ip6_tnl *nt = netdev_priv(dev);
2211
struct nlattr **data = params->data;
2212
struct nlattr **tb = params->tb;
2213
struct ip6gre_net *ign;
2214
int err;
2215
2216
ip6gre_netlink_parms(data, &nt->parms);
2217
ip6erspan_set_version(data, &nt->parms);
2218
ign = net_generic(net, ip6gre_net_id);
2219
2220
if (nt->parms.collect_md) {
2221
if (rtnl_dereference(ign->collect_md_tun_erspan))
2222
return -EEXIST;
2223
} else {
2224
if (ip6gre_tunnel_find(net, &nt->parms, dev->type))
2225
return -EEXIST;
2226
}
2227
2228
err = ip6gre_newlink_common(net, dev, tb, data, extack);
2229
if (!err) {
2230
ip6erspan_tnl_link_config(nt, !tb[IFLA_MTU]);
2231
ip6erspan_tunnel_link_md(ign, nt);
2232
ip6gre_tunnel_link(net_generic(net, ip6gre_net_id), nt);
2233
}
2234
return err;
2235
}
2236
2237
static void ip6erspan_tnl_link_config(struct ip6_tnl *t, int set_mtu)
2238
{
2239
ip6gre_tnl_link_config_common(t);
2240
ip6gre_tnl_link_config_route(t, set_mtu, ip6erspan_calc_hlen(t));
2241
}
2242
2243
static int ip6erspan_tnl_change(struct ip6_tnl *t,
2244
const struct __ip6_tnl_parm *p, int set_mtu)
2245
{
2246
ip6gre_tnl_copy_tnl_parm(t, p);
2247
ip6erspan_tnl_link_config(t, set_mtu);
2248
return 0;
2249
}
2250
2251
static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
2252
struct nlattr *data[],
2253
struct netlink_ext_ack *extack)
2254
{
2255
struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id);
2256
struct __ip6_tnl_parm p;
2257
struct ip6_tnl *t;
2258
2259
t = ip6gre_changelink_common(dev, tb, data, &p, extack);
2260
if (IS_ERR(t))
2261
return PTR_ERR(t);
2262
2263
ip6erspan_set_version(data, &p);
2264
ip6gre_tunnel_unlink_md(ign, t);
2265
ip6gre_tunnel_unlink(ign, t);
2266
ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
2267
ip6erspan_tunnel_link_md(ign, t);
2268
ip6gre_tunnel_link(ign, t);
2269
return 0;
2270
}
2271
2272
static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
2273
.kind = "ip6gre",
2274
.maxtype = IFLA_GRE_MAX,
2275
.policy = ip6gre_policy,
2276
.priv_size = sizeof(struct ip6_tnl),
2277
.setup = ip6gre_tunnel_setup,
2278
.validate = ip6gre_tunnel_validate,
2279
.newlink = ip6gre_newlink,
2280
.changelink = ip6gre_changelink,
2281
.dellink = ip6gre_dellink,
2282
.get_size = ip6gre_get_size,
2283
.fill_info = ip6gre_fill_info,
2284
.get_link_net = ip6_tnl_get_link_net,
2285
};
2286
2287
static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
2288
.kind = "ip6gretap",
2289
.maxtype = IFLA_GRE_MAX,
2290
.policy = ip6gre_policy,
2291
.priv_size = sizeof(struct ip6_tnl),
2292
.setup = ip6gre_tap_setup,
2293
.validate = ip6gre_tap_validate,
2294
.newlink = ip6gre_newlink,
2295
.changelink = ip6gre_changelink,
2296
.get_size = ip6gre_get_size,
2297
.fill_info = ip6gre_fill_info,
2298
.get_link_net = ip6_tnl_get_link_net,
2299
};
2300
2301
static struct rtnl_link_ops ip6erspan_tap_ops __read_mostly = {
2302
.kind = "ip6erspan",
2303
.maxtype = IFLA_GRE_MAX,
2304
.policy = ip6gre_policy,
2305
.priv_size = sizeof(struct ip6_tnl),
2306
.setup = ip6erspan_tap_setup,
2307
.validate = ip6erspan_tap_validate,
2308
.newlink = ip6erspan_newlink,
2309
.changelink = ip6erspan_changelink,
2310
.get_size = ip6gre_get_size,
2311
.fill_info = ip6gre_fill_info,
2312
.get_link_net = ip6_tnl_get_link_net,
2313
};
2314
2315
/*
2316
* And now the modules code and kernel interface.
2317
*/
2318
2319
static int __init ip6gre_init(void)
2320
{
2321
int err;
2322
2323
pr_info("GRE over IPv6 tunneling driver\n");
2324
2325
err = register_pernet_device(&ip6gre_net_ops);
2326
if (err < 0)
2327
return err;
2328
2329
err = inet6_add_protocol(&ip6gre_protocol, IPPROTO_GRE);
2330
if (err < 0) {
2331
pr_info("%s: can't add protocol\n", __func__);
2332
goto add_proto_failed;
2333
}
2334
2335
err = rtnl_link_register(&ip6gre_link_ops);
2336
if (err < 0)
2337
goto rtnl_link_failed;
2338
2339
err = rtnl_link_register(&ip6gre_tap_ops);
2340
if (err < 0)
2341
goto tap_ops_failed;
2342
2343
err = rtnl_link_register(&ip6erspan_tap_ops);
2344
if (err < 0)
2345
goto erspan_link_failed;
2346
2347
out:
2348
return err;
2349
2350
erspan_link_failed:
2351
rtnl_link_unregister(&ip6gre_tap_ops);
2352
tap_ops_failed:
2353
rtnl_link_unregister(&ip6gre_link_ops);
2354
rtnl_link_failed:
2355
inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2356
add_proto_failed:
2357
unregister_pernet_device(&ip6gre_net_ops);
2358
goto out;
2359
}
2360
2361
static void __exit ip6gre_fini(void)
2362
{
2363
rtnl_link_unregister(&ip6gre_tap_ops);
2364
rtnl_link_unregister(&ip6gre_link_ops);
2365
rtnl_link_unregister(&ip6erspan_tap_ops);
2366
inet6_del_protocol(&ip6gre_protocol, IPPROTO_GRE);
2367
unregister_pernet_device(&ip6gre_net_ops);
2368
}
2369
2370
module_init(ip6gre_init);
2371
module_exit(ip6gre_fini);
2372
MODULE_LICENSE("GPL");
2373
MODULE_AUTHOR("D. Kozlov <[email protected]>");
2374
MODULE_DESCRIPTION("GRE over IPv6 tunneling device");
2375
MODULE_ALIAS_RTNL_LINK("ip6gre");
2376
MODULE_ALIAS_RTNL_LINK("ip6gretap");
2377
MODULE_ALIAS_RTNL_LINK("ip6erspan");
2378
MODULE_ALIAS_NETDEV("ip6gre0");
2379
2380