Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/ipv6/addrconf.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* IPv6 Address [auto]configuration
4
* Linux INET6 implementation
5
*
6
* Authors:
7
* Pedro Roque <[email protected]>
8
* Alexey Kuznetsov <[email protected]>
9
*/
10
11
/*
12
* Changes:
13
*
14
* Janos Farkas : delete timer on ifdown
15
* <[email protected]>
16
* Andi Kleen : kill double kfree on module
17
* unload.
18
* Maciej W. Rozycki : FDDI support
19
* sekiya@USAGI : Don't send too many RS
20
* packets.
21
* yoshfuji@USAGI : Fixed interval between DAD
22
* packets.
23
* YOSHIFUJI Hideaki @USAGI : improved accuracy of
24
* address validation timer.
25
* YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
26
* support.
27
* Yuji SEKIYA @USAGI : Don't assign a same IPv6
28
* address on a same interface.
29
* YOSHIFUJI Hideaki @USAGI : ARCnet support
30
* YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
31
* seq_file.
32
* YOSHIFUJI Hideaki @USAGI : improved source address
33
* selection; consider scope,
34
* status etc.
35
*/
36
37
#define pr_fmt(fmt) "IPv6: " fmt
38
39
#include <linux/errno.h>
40
#include <linux/types.h>
41
#include <linux/kernel.h>
42
#include <linux/sched/signal.h>
43
#include <linux/socket.h>
44
#include <linux/sockios.h>
45
#include <linux/net.h>
46
#include <linux/inet.h>
47
#include <linux/in6.h>
48
#include <linux/netdevice.h>
49
#include <linux/if_addr.h>
50
#include <linux/if_arp.h>
51
#include <linux/if_arcnet.h>
52
#include <linux/if_infiniband.h>
53
#include <linux/route.h>
54
#include <linux/inetdevice.h>
55
#include <linux/init.h>
56
#include <linux/slab.h>
57
#ifdef CONFIG_SYSCTL
58
#include <linux/sysctl.h>
59
#endif
60
#include <linux/capability.h>
61
#include <linux/delay.h>
62
#include <linux/notifier.h>
63
#include <linux/string.h>
64
#include <linux/hash.h>
65
66
#include <net/ip_tunnels.h>
67
#include <net/net_namespace.h>
68
#include <net/sock.h>
69
#include <net/snmp.h>
70
71
#include <net/6lowpan.h>
72
#include <net/firewire.h>
73
#include <net/ipv6.h>
74
#include <net/protocol.h>
75
#include <net/ndisc.h>
76
#include <net/ip6_route.h>
77
#include <net/addrconf.h>
78
#include <net/tcp.h>
79
#include <net/ip.h>
80
#include <net/netlink.h>
81
#include <net/pkt_sched.h>
82
#include <net/l3mdev.h>
83
#include <net/netdev_lock.h>
84
#include <linux/if_tunnel.h>
85
#include <linux/rtnetlink.h>
86
#include <linux/netconf.h>
87
#include <linux/random.h>
88
#include <linux/uaccess.h>
89
#include <linux/unaligned.h>
90
91
#include <linux/proc_fs.h>
92
#include <linux/seq_file.h>
93
#include <linux/export.h>
94
#include <linux/ioam6.h>
95
96
#define IPV6_MAX_STRLEN \
97
sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
98
99
static inline u32 cstamp_delta(unsigned long cstamp)
100
{
101
return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
102
}
103
104
static inline s32 rfc3315_s14_backoff_init(s32 irt)
105
{
106
/* multiply 'initial retransmission time' by 0.9 .. 1.1 */
107
u64 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)irt;
108
do_div(tmp, 1000000);
109
return (s32)tmp;
110
}
111
112
static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
113
{
114
/* multiply 'retransmission timeout' by 1.9 .. 2.1 */
115
u64 tmp = get_random_u32_inclusive(1900000, 2100000) * (u64)rt;
116
do_div(tmp, 1000000);
117
if ((s32)tmp > mrt) {
118
/* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
119
tmp = get_random_u32_inclusive(900000, 1100000) * (u64)mrt;
120
do_div(tmp, 1000000);
121
}
122
return (s32)tmp;
123
}
124
125
#ifdef CONFIG_SYSCTL
126
static int addrconf_sysctl_register(struct inet6_dev *idev);
127
static void addrconf_sysctl_unregister(struct inet6_dev *idev);
128
#else
129
static inline int addrconf_sysctl_register(struct inet6_dev *idev)
130
{
131
return 0;
132
}
133
134
static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
135
{
136
}
137
#endif
138
139
static void ipv6_gen_rnd_iid(struct in6_addr *addr);
140
141
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
142
static int ipv6_count_addresses(const struct inet6_dev *idev);
143
static int ipv6_generate_stable_address(struct in6_addr *addr,
144
u8 dad_count,
145
const struct inet6_dev *idev);
146
147
#define IN6_ADDR_HSIZE_SHIFT 8
148
#define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
149
150
static void addrconf_verify(struct net *net);
151
static void addrconf_verify_rtnl(struct net *net);
152
153
static struct workqueue_struct *addrconf_wq;
154
155
static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
156
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
157
158
static void addrconf_type_change(struct net_device *dev,
159
unsigned long event);
160
static int addrconf_ifdown(struct net_device *dev, bool unregister);
161
162
static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
163
int plen,
164
const struct net_device *dev,
165
u32 flags, u32 noflags,
166
bool no_gw);
167
168
static void addrconf_dad_start(struct inet6_ifaddr *ifp);
169
static void addrconf_dad_work(struct work_struct *w);
170
static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
171
bool send_na);
172
static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
173
static void addrconf_rs_timer(struct timer_list *t);
174
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
175
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
176
177
static void inet6_prefix_notify(int event, struct inet6_dev *idev,
178
struct prefix_info *pinfo);
179
180
static struct ipv6_devconf ipv6_devconf __read_mostly = {
181
.forwarding = 0,
182
.hop_limit = IPV6_DEFAULT_HOPLIMIT,
183
.mtu6 = IPV6_MIN_MTU,
184
.accept_ra = 1,
185
.accept_redirects = 1,
186
.autoconf = 1,
187
.force_mld_version = 0,
188
.mldv1_unsolicited_report_interval = 10 * HZ,
189
.mldv2_unsolicited_report_interval = HZ,
190
.dad_transmits = 1,
191
.rtr_solicits = MAX_RTR_SOLICITATIONS,
192
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
193
.rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
194
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
195
.use_tempaddr = 0,
196
.temp_valid_lft = TEMP_VALID_LIFETIME,
197
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
198
.regen_min_advance = REGEN_MIN_ADVANCE,
199
.regen_max_retry = REGEN_MAX_RETRY,
200
.max_desync_factor = MAX_DESYNC_FACTOR,
201
.max_addresses = IPV6_MAX_ADDRESSES,
202
.accept_ra_defrtr = 1,
203
.ra_defrtr_metric = IP6_RT_PRIO_USER,
204
.accept_ra_from_local = 0,
205
.accept_ra_min_hop_limit= 1,
206
.accept_ra_min_lft = 0,
207
.accept_ra_pinfo = 1,
208
#ifdef CONFIG_IPV6_ROUTER_PREF
209
.accept_ra_rtr_pref = 1,
210
.rtr_probe_interval = 60 * HZ,
211
#ifdef CONFIG_IPV6_ROUTE_INFO
212
.accept_ra_rt_info_min_plen = 0,
213
.accept_ra_rt_info_max_plen = 0,
214
#endif
215
#endif
216
.proxy_ndp = 0,
217
.accept_source_route = 0, /* we do not accept RH0 by default. */
218
.disable_ipv6 = 0,
219
.accept_dad = 0,
220
.suppress_frag_ndisc = 1,
221
.accept_ra_mtu = 1,
222
.stable_secret = {
223
.initialized = false,
224
},
225
.use_oif_addrs_only = 0,
226
.ignore_routes_with_linkdown = 0,
227
.keep_addr_on_down = 0,
228
.seg6_enabled = 0,
229
#ifdef CONFIG_IPV6_SEG6_HMAC
230
.seg6_require_hmac = 0,
231
#endif
232
.enhanced_dad = 1,
233
.addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
234
.disable_policy = 0,
235
.rpl_seg_enabled = 0,
236
.ioam6_enabled = 0,
237
.ioam6_id = IOAM6_DEFAULT_IF_ID,
238
.ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
239
.ndisc_evict_nocarrier = 1,
240
.ra_honor_pio_life = 0,
241
.ra_honor_pio_pflag = 0,
242
.force_forwarding = 0,
243
};
244
245
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
246
.forwarding = 0,
247
.hop_limit = IPV6_DEFAULT_HOPLIMIT,
248
.mtu6 = IPV6_MIN_MTU,
249
.accept_ra = 1,
250
.accept_redirects = 1,
251
.autoconf = 1,
252
.force_mld_version = 0,
253
.mldv1_unsolicited_report_interval = 10 * HZ,
254
.mldv2_unsolicited_report_interval = HZ,
255
.dad_transmits = 1,
256
.rtr_solicits = MAX_RTR_SOLICITATIONS,
257
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
258
.rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
259
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
260
.use_tempaddr = 0,
261
.temp_valid_lft = TEMP_VALID_LIFETIME,
262
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
263
.regen_min_advance = REGEN_MIN_ADVANCE,
264
.regen_max_retry = REGEN_MAX_RETRY,
265
.max_desync_factor = MAX_DESYNC_FACTOR,
266
.max_addresses = IPV6_MAX_ADDRESSES,
267
.accept_ra_defrtr = 1,
268
.ra_defrtr_metric = IP6_RT_PRIO_USER,
269
.accept_ra_from_local = 0,
270
.accept_ra_min_hop_limit= 1,
271
.accept_ra_min_lft = 0,
272
.accept_ra_pinfo = 1,
273
#ifdef CONFIG_IPV6_ROUTER_PREF
274
.accept_ra_rtr_pref = 1,
275
.rtr_probe_interval = 60 * HZ,
276
#ifdef CONFIG_IPV6_ROUTE_INFO
277
.accept_ra_rt_info_min_plen = 0,
278
.accept_ra_rt_info_max_plen = 0,
279
#endif
280
#endif
281
.proxy_ndp = 0,
282
.accept_source_route = 0, /* we do not accept RH0 by default. */
283
.disable_ipv6 = 0,
284
.accept_dad = 1,
285
.suppress_frag_ndisc = 1,
286
.accept_ra_mtu = 1,
287
.stable_secret = {
288
.initialized = false,
289
},
290
.use_oif_addrs_only = 0,
291
.ignore_routes_with_linkdown = 0,
292
.keep_addr_on_down = 0,
293
.seg6_enabled = 0,
294
#ifdef CONFIG_IPV6_SEG6_HMAC
295
.seg6_require_hmac = 0,
296
#endif
297
.enhanced_dad = 1,
298
.addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
299
.disable_policy = 0,
300
.rpl_seg_enabled = 0,
301
.ioam6_enabled = 0,
302
.ioam6_id = IOAM6_DEFAULT_IF_ID,
303
.ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
304
.ndisc_evict_nocarrier = 1,
305
.ra_honor_pio_life = 0,
306
.ra_honor_pio_pflag = 0,
307
.force_forwarding = 0,
308
};
309
310
/* Check if link is ready: is it up and is a valid qdisc available */
311
static inline bool addrconf_link_ready(const struct net_device *dev)
312
{
313
return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
314
}
315
316
static void addrconf_del_rs_timer(struct inet6_dev *idev)
317
{
318
if (timer_delete(&idev->rs_timer))
319
__in6_dev_put(idev);
320
}
321
322
static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
323
{
324
if (cancel_delayed_work(&ifp->dad_work))
325
__in6_ifa_put(ifp);
326
}
327
328
static void addrconf_mod_rs_timer(struct inet6_dev *idev,
329
unsigned long when)
330
{
331
if (!mod_timer(&idev->rs_timer, jiffies + when))
332
in6_dev_hold(idev);
333
}
334
335
static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
336
unsigned long delay)
337
{
338
in6_ifa_hold(ifp);
339
if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
340
in6_ifa_put(ifp);
341
}
342
343
static int snmp6_alloc_dev(struct inet6_dev *idev)
344
{
345
int i;
346
347
idev->stats.ipv6 = alloc_percpu_gfp(struct ipstats_mib, GFP_KERNEL_ACCOUNT);
348
if (!idev->stats.ipv6)
349
goto err_ip;
350
351
for_each_possible_cpu(i) {
352
struct ipstats_mib *addrconf_stats;
353
addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
354
u64_stats_init(&addrconf_stats->syncp);
355
}
356
357
358
idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
359
GFP_KERNEL);
360
if (!idev->stats.icmpv6dev)
361
goto err_icmp;
362
idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
363
GFP_KERNEL_ACCOUNT);
364
if (!idev->stats.icmpv6msgdev)
365
goto err_icmpmsg;
366
367
return 0;
368
369
err_icmpmsg:
370
kfree(idev->stats.icmpv6dev);
371
err_icmp:
372
free_percpu(idev->stats.ipv6);
373
err_ip:
374
return -ENOMEM;
375
}
376
377
static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
378
{
379
struct inet6_dev *ndev;
380
int err = -ENOMEM;
381
382
ASSERT_RTNL();
383
netdev_ops_assert_locked(dev);
384
385
if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev)
386
return ERR_PTR(-EINVAL);
387
388
ndev = kzalloc(sizeof(*ndev), GFP_KERNEL_ACCOUNT);
389
if (!ndev)
390
return ERR_PTR(err);
391
392
rwlock_init(&ndev->lock);
393
ndev->dev = dev;
394
INIT_LIST_HEAD(&ndev->addr_list);
395
timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
396
memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
397
398
if (ndev->cnf.stable_secret.initialized)
399
ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
400
401
ndev->cnf.mtu6 = dev->mtu;
402
ndev->ra_mtu = 0;
403
ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
404
if (!ndev->nd_parms) {
405
kfree(ndev);
406
return ERR_PTR(err);
407
}
408
if (ndev->cnf.forwarding)
409
netif_disable_lro(dev);
410
/* We refer to the device */
411
netdev_hold(dev, &ndev->dev_tracker, GFP_KERNEL);
412
413
if (snmp6_alloc_dev(ndev) < 0) {
414
netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
415
__func__);
416
neigh_parms_release(&nd_tbl, ndev->nd_parms);
417
netdev_put(dev, &ndev->dev_tracker);
418
kfree(ndev);
419
return ERR_PTR(err);
420
}
421
422
if (dev != blackhole_netdev) {
423
if (snmp6_register_dev(ndev) < 0) {
424
netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
425
__func__, dev->name);
426
goto err_release;
427
}
428
}
429
/* One reference from device. */
430
refcount_set(&ndev->refcnt, 1);
431
432
if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
433
ndev->cnf.accept_dad = -1;
434
435
#if IS_ENABLED(CONFIG_IPV6_SIT)
436
if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
437
pr_info("%s: Disabled Multicast RS\n", dev->name);
438
ndev->cnf.rtr_solicits = 0;
439
}
440
#endif
441
442
INIT_LIST_HEAD(&ndev->tempaddr_list);
443
ndev->desync_factor = U32_MAX;
444
if ((dev->flags&IFF_LOOPBACK) ||
445
dev->type == ARPHRD_TUNNEL ||
446
dev->type == ARPHRD_TUNNEL6 ||
447
dev->type == ARPHRD_SIT ||
448
dev->type == ARPHRD_NONE) {
449
ndev->cnf.use_tempaddr = -1;
450
}
451
452
ndev->token = in6addr_any;
453
454
if (netif_running(dev) && addrconf_link_ready(dev))
455
ndev->if_flags |= IF_READY;
456
457
ipv6_mc_init_dev(ndev);
458
ndev->tstamp = jiffies;
459
if (dev != blackhole_netdev) {
460
err = addrconf_sysctl_register(ndev);
461
if (err) {
462
ipv6_mc_destroy_dev(ndev);
463
snmp6_unregister_dev(ndev);
464
goto err_release;
465
}
466
}
467
/* protected by rtnl_lock */
468
rcu_assign_pointer(dev->ip6_ptr, ndev);
469
470
if (dev != blackhole_netdev) {
471
/* Join interface-local all-node multicast group */
472
ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
473
474
/* Join all-node multicast group */
475
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
476
477
/* Join all-router multicast group if forwarding is set */
478
if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
479
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
480
}
481
return ndev;
482
483
err_release:
484
neigh_parms_release(&nd_tbl, ndev->nd_parms);
485
ndev->dead = 1;
486
in6_dev_finish_destroy(ndev);
487
return ERR_PTR(err);
488
}
489
490
static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
491
{
492
struct inet6_dev *idev;
493
494
ASSERT_RTNL();
495
496
idev = __in6_dev_get(dev);
497
if (!idev) {
498
idev = ipv6_add_dev(dev);
499
if (IS_ERR(idev))
500
return idev;
501
}
502
503
if (dev->flags&IFF_UP)
504
ipv6_mc_up(idev);
505
return idev;
506
}
507
508
static int inet6_netconf_msgsize_devconf(int type)
509
{
510
int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
511
+ nla_total_size(4); /* NETCONFA_IFINDEX */
512
bool all = false;
513
514
if (type == NETCONFA_ALL)
515
all = true;
516
517
if (all || type == NETCONFA_FORWARDING)
518
size += nla_total_size(4);
519
#ifdef CONFIG_IPV6_MROUTE
520
if (all || type == NETCONFA_MC_FORWARDING)
521
size += nla_total_size(4);
522
#endif
523
if (all || type == NETCONFA_PROXY_NEIGH)
524
size += nla_total_size(4);
525
526
if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
527
size += nla_total_size(4);
528
529
return size;
530
}
531
532
static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
533
struct ipv6_devconf *devconf, u32 portid,
534
u32 seq, int event, unsigned int flags,
535
int type)
536
{
537
struct nlmsghdr *nlh;
538
struct netconfmsg *ncm;
539
bool all = false;
540
541
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
542
flags);
543
if (!nlh)
544
return -EMSGSIZE;
545
546
if (type == NETCONFA_ALL)
547
all = true;
548
549
ncm = nlmsg_data(nlh);
550
ncm->ncm_family = AF_INET6;
551
552
if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
553
goto nla_put_failure;
554
555
if (!devconf)
556
goto out;
557
558
if ((all || type == NETCONFA_FORWARDING) &&
559
nla_put_s32(skb, NETCONFA_FORWARDING,
560
READ_ONCE(devconf->forwarding)) < 0)
561
goto nla_put_failure;
562
#ifdef CONFIG_IPV6_MROUTE
563
if ((all || type == NETCONFA_MC_FORWARDING) &&
564
nla_put_s32(skb, NETCONFA_MC_FORWARDING,
565
atomic_read(&devconf->mc_forwarding)) < 0)
566
goto nla_put_failure;
567
#endif
568
if ((all || type == NETCONFA_PROXY_NEIGH) &&
569
nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
570
READ_ONCE(devconf->proxy_ndp)) < 0)
571
goto nla_put_failure;
572
573
if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
574
nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
575
READ_ONCE(devconf->ignore_routes_with_linkdown)) < 0)
576
goto nla_put_failure;
577
578
out:
579
nlmsg_end(skb, nlh);
580
return 0;
581
582
nla_put_failure:
583
nlmsg_cancel(skb, nlh);
584
return -EMSGSIZE;
585
}
586
587
void inet6_netconf_notify_devconf(struct net *net, int event, int type,
588
int ifindex, struct ipv6_devconf *devconf)
589
{
590
struct sk_buff *skb;
591
int err = -ENOBUFS;
592
593
skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
594
if (!skb)
595
goto errout;
596
597
err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
598
event, 0, type);
599
if (err < 0) {
600
/* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
601
WARN_ON(err == -EMSGSIZE);
602
kfree_skb(skb);
603
goto errout;
604
}
605
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
606
return;
607
errout:
608
rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
609
}
610
611
static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
612
[NETCONFA_IFINDEX] = { .len = sizeof(int) },
613
[NETCONFA_FORWARDING] = { .len = sizeof(int) },
614
[NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
615
[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
616
};
617
618
static int inet6_netconf_valid_get_req(struct sk_buff *skb,
619
const struct nlmsghdr *nlh,
620
struct nlattr **tb,
621
struct netlink_ext_ack *extack)
622
{
623
int i, err;
624
625
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
626
NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request");
627
return -EINVAL;
628
}
629
630
if (!netlink_strict_get_check(skb))
631
return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
632
tb, NETCONFA_MAX,
633
devconf_ipv6_policy, extack);
634
635
err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
636
tb, NETCONFA_MAX,
637
devconf_ipv6_policy, extack);
638
if (err)
639
return err;
640
641
for (i = 0; i <= NETCONFA_MAX; i++) {
642
if (!tb[i])
643
continue;
644
645
switch (i) {
646
case NETCONFA_IFINDEX:
647
break;
648
default:
649
NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
650
return -EINVAL;
651
}
652
}
653
654
return 0;
655
}
656
657
static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
658
struct nlmsghdr *nlh,
659
struct netlink_ext_ack *extack)
660
{
661
struct net *net = sock_net(in_skb->sk);
662
struct nlattr *tb[NETCONFA_MAX+1];
663
struct inet6_dev *in6_dev = NULL;
664
struct net_device *dev = NULL;
665
struct sk_buff *skb;
666
struct ipv6_devconf *devconf;
667
int ifindex;
668
int err;
669
670
err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
671
if (err < 0)
672
return err;
673
674
if (!tb[NETCONFA_IFINDEX])
675
return -EINVAL;
676
677
err = -EINVAL;
678
ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
679
switch (ifindex) {
680
case NETCONFA_IFINDEX_ALL:
681
devconf = net->ipv6.devconf_all;
682
break;
683
case NETCONFA_IFINDEX_DEFAULT:
684
devconf = net->ipv6.devconf_dflt;
685
break;
686
default:
687
dev = dev_get_by_index(net, ifindex);
688
if (!dev)
689
return -EINVAL;
690
in6_dev = in6_dev_get(dev);
691
if (!in6_dev)
692
goto errout;
693
devconf = &in6_dev->cnf;
694
break;
695
}
696
697
err = -ENOBUFS;
698
skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
699
if (!skb)
700
goto errout;
701
702
err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
703
NETLINK_CB(in_skb).portid,
704
nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
705
NETCONFA_ALL);
706
if (err < 0) {
707
/* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
708
WARN_ON(err == -EMSGSIZE);
709
kfree_skb(skb);
710
goto errout;
711
}
712
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
713
errout:
714
if (in6_dev)
715
in6_dev_put(in6_dev);
716
dev_put(dev);
717
return err;
718
}
719
720
/* Combine dev_addr_genid and dev_base_seq to detect changes.
721
*/
722
static u32 inet6_base_seq(const struct net *net)
723
{
724
u32 res = atomic_read(&net->ipv6.dev_addr_genid) +
725
READ_ONCE(net->dev_base_seq);
726
727
/* Must not return 0 (see nl_dump_check_consistent()).
728
* Chose a value far away from 0.
729
*/
730
if (!res)
731
res = 0x80000000;
732
return res;
733
}
734
735
static int inet6_netconf_dump_devconf(struct sk_buff *skb,
736
struct netlink_callback *cb)
737
{
738
const struct nlmsghdr *nlh = cb->nlh;
739
struct net *net = sock_net(skb->sk);
740
struct {
741
unsigned long ifindex;
742
unsigned int all_default;
743
} *ctx = (void *)cb->ctx;
744
struct net_device *dev;
745
struct inet6_dev *idev;
746
int err = 0;
747
748
if (cb->strict_check) {
749
struct netlink_ext_ack *extack = cb->extack;
750
struct netconfmsg *ncm;
751
752
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
753
NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
754
return -EINVAL;
755
}
756
757
if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
758
NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
759
return -EINVAL;
760
}
761
}
762
763
rcu_read_lock();
764
for_each_netdev_dump(net, dev, ctx->ifindex) {
765
idev = __in6_dev_get(dev);
766
if (!idev)
767
continue;
768
err = inet6_netconf_fill_devconf(skb, dev->ifindex,
769
&idev->cnf,
770
NETLINK_CB(cb->skb).portid,
771
nlh->nlmsg_seq,
772
RTM_NEWNETCONF,
773
NLM_F_MULTI,
774
NETCONFA_ALL);
775
if (err < 0)
776
goto done;
777
}
778
if (ctx->all_default == 0) {
779
err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
780
net->ipv6.devconf_all,
781
NETLINK_CB(cb->skb).portid,
782
nlh->nlmsg_seq,
783
RTM_NEWNETCONF, NLM_F_MULTI,
784
NETCONFA_ALL);
785
if (err < 0)
786
goto done;
787
ctx->all_default++;
788
}
789
if (ctx->all_default == 1) {
790
err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
791
net->ipv6.devconf_dflt,
792
NETLINK_CB(cb->skb).portid,
793
nlh->nlmsg_seq,
794
RTM_NEWNETCONF, NLM_F_MULTI,
795
NETCONFA_ALL);
796
if (err < 0)
797
goto done;
798
ctx->all_default++;
799
}
800
done:
801
rcu_read_unlock();
802
return err;
803
}
804
805
#ifdef CONFIG_SYSCTL
806
static void dev_forward_change(struct inet6_dev *idev)
807
{
808
struct net_device *dev;
809
struct inet6_ifaddr *ifa;
810
LIST_HEAD(tmp_addr_list);
811
812
if (!idev)
813
return;
814
dev = idev->dev;
815
if (idev->cnf.forwarding)
816
dev_disable_lro(dev);
817
if (dev->flags & IFF_MULTICAST) {
818
if (idev->cnf.forwarding) {
819
ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
820
ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
821
ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
822
} else {
823
ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
824
ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
825
ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
826
}
827
}
828
829
read_lock_bh(&idev->lock);
830
list_for_each_entry(ifa, &idev->addr_list, if_list) {
831
if (ifa->flags&IFA_F_TENTATIVE)
832
continue;
833
list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
834
}
835
read_unlock_bh(&idev->lock);
836
837
while (!list_empty(&tmp_addr_list)) {
838
ifa = list_first_entry(&tmp_addr_list,
839
struct inet6_ifaddr, if_list_aux);
840
list_del(&ifa->if_list_aux);
841
if (idev->cnf.forwarding)
842
addrconf_join_anycast(ifa);
843
else
844
addrconf_leave_anycast(ifa);
845
}
846
847
inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
848
NETCONFA_FORWARDING,
849
dev->ifindex, &idev->cnf);
850
}
851
852
853
static void addrconf_forward_change(struct net *net, __s32 newf)
854
{
855
struct net_device *dev;
856
struct inet6_dev *idev;
857
858
for_each_netdev(net, dev) {
859
idev = __in6_dev_get_rtnl_net(dev);
860
if (idev) {
861
int changed = (!idev->cnf.forwarding) ^ (!newf);
862
/* Disabling all.forwarding sets 0 to force_forwarding for all interfaces */
863
if (newf == 0)
864
WRITE_ONCE(idev->cnf.force_forwarding, 0);
865
866
WRITE_ONCE(idev->cnf.forwarding, newf);
867
if (changed)
868
dev_forward_change(idev);
869
}
870
}
871
}
872
873
static int addrconf_fixup_forwarding(const struct ctl_table *table, int *p, int newf)
874
{
875
struct net *net = (struct net *)table->extra2;
876
int old;
877
878
if (!rtnl_net_trylock(net))
879
return restart_syscall();
880
881
old = *p;
882
WRITE_ONCE(*p, newf);
883
884
if (p == &net->ipv6.devconf_dflt->forwarding) {
885
if ((!newf) ^ (!old))
886
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
887
NETCONFA_FORWARDING,
888
NETCONFA_IFINDEX_DEFAULT,
889
net->ipv6.devconf_dflt);
890
rtnl_net_unlock(net);
891
return 0;
892
}
893
894
if (p == &net->ipv6.devconf_all->forwarding) {
895
int old_dflt = net->ipv6.devconf_dflt->forwarding;
896
897
WRITE_ONCE(net->ipv6.devconf_dflt->forwarding, newf);
898
if ((!newf) ^ (!old_dflt))
899
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
900
NETCONFA_FORWARDING,
901
NETCONFA_IFINDEX_DEFAULT,
902
net->ipv6.devconf_dflt);
903
904
addrconf_forward_change(net, newf);
905
if ((!newf) ^ (!old))
906
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
907
NETCONFA_FORWARDING,
908
NETCONFA_IFINDEX_ALL,
909
net->ipv6.devconf_all);
910
} else if ((!newf) ^ (!old))
911
dev_forward_change((struct inet6_dev *)table->extra1);
912
rtnl_net_unlock(net);
913
914
if (newf)
915
rt6_purge_dflt_routers(net);
916
return 1;
917
}
918
919
static void addrconf_linkdown_change(struct net *net, __s32 newf)
920
{
921
struct net_device *dev;
922
struct inet6_dev *idev;
923
924
for_each_netdev(net, dev) {
925
idev = __in6_dev_get_rtnl_net(dev);
926
if (idev) {
927
int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
928
929
WRITE_ONCE(idev->cnf.ignore_routes_with_linkdown, newf);
930
if (changed)
931
inet6_netconf_notify_devconf(dev_net(dev),
932
RTM_NEWNETCONF,
933
NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
934
dev->ifindex,
935
&idev->cnf);
936
}
937
}
938
}
939
940
static int addrconf_fixup_linkdown(const struct ctl_table *table, int *p, int newf)
941
{
942
struct net *net = (struct net *)table->extra2;
943
int old;
944
945
if (!rtnl_net_trylock(net))
946
return restart_syscall();
947
948
old = *p;
949
WRITE_ONCE(*p, newf);
950
951
if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
952
if ((!newf) ^ (!old))
953
inet6_netconf_notify_devconf(net,
954
RTM_NEWNETCONF,
955
NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
956
NETCONFA_IFINDEX_DEFAULT,
957
net->ipv6.devconf_dflt);
958
rtnl_net_unlock(net);
959
return 0;
960
}
961
962
if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
963
WRITE_ONCE(net->ipv6.devconf_dflt->ignore_routes_with_linkdown, newf);
964
addrconf_linkdown_change(net, newf);
965
if ((!newf) ^ (!old))
966
inet6_netconf_notify_devconf(net,
967
RTM_NEWNETCONF,
968
NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
969
NETCONFA_IFINDEX_ALL,
970
net->ipv6.devconf_all);
971
}
972
973
rtnl_net_unlock(net);
974
975
return 1;
976
}
977
978
#endif
979
980
/* Nobody refers to this ifaddr, destroy it */
981
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
982
{
983
WARN_ON(!hlist_unhashed(&ifp->addr_lst));
984
985
#ifdef NET_REFCNT_DEBUG
986
pr_debug("%s\n", __func__);
987
#endif
988
989
in6_dev_put(ifp->idev);
990
991
if (cancel_delayed_work(&ifp->dad_work))
992
pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
993
ifp);
994
995
if (ifp->state != INET6_IFADDR_STATE_DEAD) {
996
pr_warn("Freeing alive inet6 address %p\n", ifp);
997
return;
998
}
999
1000
kfree_rcu(ifp, rcu);
1001
}
1002
1003
static void
1004
ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
1005
{
1006
struct list_head *p;
1007
int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
1008
1009
/*
1010
* Each device address list is sorted in order of scope -
1011
* global before linklocal.
1012
*/
1013
list_for_each(p, &idev->addr_list) {
1014
struct inet6_ifaddr *ifa
1015
= list_entry(p, struct inet6_ifaddr, if_list);
1016
if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
1017
break;
1018
}
1019
1020
list_add_tail_rcu(&ifp->if_list, p);
1021
}
1022
1023
static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
1024
{
1025
u32 val = __ipv6_addr_jhash(addr, net_hash_mix(net));
1026
1027
return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
1028
}
1029
1030
static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1031
struct net_device *dev, unsigned int hash)
1032
{
1033
struct inet6_ifaddr *ifp;
1034
1035
hlist_for_each_entry(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1036
if (ipv6_addr_equal(&ifp->addr, addr)) {
1037
if (!dev || ifp->idev->dev == dev)
1038
return true;
1039
}
1040
}
1041
return false;
1042
}
1043
1044
static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
1045
{
1046
struct net *net = dev_net(dev);
1047
unsigned int hash = inet6_addr_hash(net, &ifa->addr);
1048
int err = 0;
1049
1050
spin_lock_bh(&net->ipv6.addrconf_hash_lock);
1051
1052
/* Ignore adding duplicate addresses on an interface */
1053
if (ipv6_chk_same_addr(net, &ifa->addr, dev, hash)) {
1054
netdev_dbg(dev, "ipv6_add_addr: already assigned\n");
1055
err = -EEXIST;
1056
} else {
1057
hlist_add_head_rcu(&ifa->addr_lst, &net->ipv6.inet6_addr_lst[hash]);
1058
}
1059
1060
spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
1061
1062
return err;
1063
}
1064
1065
/* On success it returns ifp with increased reference count */
1066
1067
static struct inet6_ifaddr *
1068
ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1069
bool can_block, struct netlink_ext_ack *extack)
1070
{
1071
gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1072
int addr_type = ipv6_addr_type(cfg->pfx);
1073
struct net *net = dev_net(idev->dev);
1074
struct inet6_ifaddr *ifa = NULL;
1075
struct fib6_info *f6i = NULL;
1076
int err = 0;
1077
1078
if (addr_type == IPV6_ADDR_ANY) {
1079
NL_SET_ERR_MSG_MOD(extack, "Invalid address");
1080
return ERR_PTR(-EADDRNOTAVAIL);
1081
} else if (addr_type & IPV6_ADDR_MULTICAST &&
1082
!(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) {
1083
NL_SET_ERR_MSG_MOD(extack, "Cannot assign multicast address without \"IFA_F_MCAUTOJOIN\" flag");
1084
return ERR_PTR(-EADDRNOTAVAIL);
1085
} else if (!(idev->dev->flags & IFF_LOOPBACK) &&
1086
!netif_is_l3_master(idev->dev) &&
1087
addr_type & IPV6_ADDR_LOOPBACK) {
1088
NL_SET_ERR_MSG_MOD(extack, "Cannot assign loopback address on this device");
1089
return ERR_PTR(-EADDRNOTAVAIL);
1090
}
1091
1092
if (idev->dead) {
1093
NL_SET_ERR_MSG_MOD(extack, "device is going away");
1094
err = -ENODEV;
1095
goto out;
1096
}
1097
1098
if (idev->cnf.disable_ipv6) {
1099
NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device");
1100
err = -EACCES;
1101
goto out;
1102
}
1103
1104
/* validator notifier needs to be blocking;
1105
* do not call in atomic context
1106
*/
1107
if (can_block) {
1108
struct in6_validator_info i6vi = {
1109
.i6vi_addr = *cfg->pfx,
1110
.i6vi_dev = idev,
1111
.extack = extack,
1112
};
1113
1114
err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1115
err = notifier_to_errno(err);
1116
if (err < 0)
1117
goto out;
1118
}
1119
1120
ifa = kzalloc(sizeof(*ifa), gfp_flags | __GFP_ACCOUNT);
1121
if (!ifa) {
1122
err = -ENOBUFS;
1123
goto out;
1124
}
1125
1126
f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags, extack);
1127
if (IS_ERR(f6i)) {
1128
err = PTR_ERR(f6i);
1129
f6i = NULL;
1130
goto out;
1131
}
1132
1133
neigh_parms_data_state_setall(idev->nd_parms);
1134
1135
ifa->addr = *cfg->pfx;
1136
if (cfg->peer_pfx)
1137
ifa->peer_addr = *cfg->peer_pfx;
1138
1139
spin_lock_init(&ifa->lock);
1140
INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1141
INIT_HLIST_NODE(&ifa->addr_lst);
1142
ifa->scope = cfg->scope;
1143
ifa->prefix_len = cfg->plen;
1144
ifa->rt_priority = cfg->rt_priority;
1145
ifa->flags = cfg->ifa_flags;
1146
ifa->ifa_proto = cfg->ifa_proto;
1147
/* No need to add the TENTATIVE flag for addresses with NODAD */
1148
if (!(cfg->ifa_flags & IFA_F_NODAD))
1149
ifa->flags |= IFA_F_TENTATIVE;
1150
ifa->valid_lft = cfg->valid_lft;
1151
ifa->prefered_lft = cfg->preferred_lft;
1152
ifa->cstamp = ifa->tstamp = jiffies;
1153
ifa->tokenized = false;
1154
1155
ifa->rt = f6i;
1156
1157
ifa->idev = idev;
1158
in6_dev_hold(idev);
1159
1160
/* For caller */
1161
refcount_set(&ifa->refcnt, 1);
1162
1163
rcu_read_lock();
1164
1165
err = ipv6_add_addr_hash(idev->dev, ifa);
1166
if (err < 0) {
1167
rcu_read_unlock();
1168
goto out;
1169
}
1170
1171
write_lock_bh(&idev->lock);
1172
1173
/* Add to inet6_dev unicast addr list. */
1174
ipv6_link_dev_addr(idev, ifa);
1175
1176
if (ifa->flags&IFA_F_TEMPORARY) {
1177
list_add(&ifa->tmp_list, &idev->tempaddr_list);
1178
in6_ifa_hold(ifa);
1179
}
1180
1181
in6_ifa_hold(ifa);
1182
write_unlock_bh(&idev->lock);
1183
1184
rcu_read_unlock();
1185
1186
inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1187
out:
1188
if (unlikely(err < 0)) {
1189
fib6_info_release(f6i);
1190
1191
if (ifa) {
1192
if (ifa->idev)
1193
in6_dev_put(ifa->idev);
1194
kfree(ifa);
1195
}
1196
ifa = ERR_PTR(err);
1197
}
1198
1199
return ifa;
1200
}
1201
1202
enum cleanup_prefix_rt_t {
1203
CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1204
CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1205
CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1206
};
1207
1208
/*
1209
* Check, whether the prefix for ifp would still need a prefix route
1210
* after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1211
* constants.
1212
*
1213
* 1) we don't purge prefix if address was not permanent.
1214
* prefix is managed by its own lifetime.
1215
* 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1216
* 3) if there are no addresses, delete prefix.
1217
* 4) if there are still other permanent address(es),
1218
* corresponding prefix is still permanent.
1219
* 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1220
* don't purge the prefix, assume user space is managing it.
1221
* 6) otherwise, update prefix lifetime to the
1222
* longest valid lifetime among the corresponding
1223
* addresses on the device.
1224
* Note: subsequent RA will update lifetime.
1225
**/
1226
static enum cleanup_prefix_rt_t
1227
check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1228
{
1229
struct inet6_ifaddr *ifa;
1230
struct inet6_dev *idev = ifp->idev;
1231
unsigned long lifetime;
1232
enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1233
1234
*expires = jiffies;
1235
1236
list_for_each_entry(ifa, &idev->addr_list, if_list) {
1237
if (ifa == ifp)
1238
continue;
1239
if (ifa->prefix_len != ifp->prefix_len ||
1240
!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1241
ifp->prefix_len))
1242
continue;
1243
if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1244
return CLEANUP_PREFIX_RT_NOP;
1245
1246
action = CLEANUP_PREFIX_RT_EXPIRE;
1247
1248
spin_lock(&ifa->lock);
1249
1250
lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1251
/*
1252
* Note: Because this address is
1253
* not permanent, lifetime <
1254
* LONG_MAX / HZ here.
1255
*/
1256
if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1257
*expires = ifa->tstamp + lifetime * HZ;
1258
spin_unlock(&ifa->lock);
1259
}
1260
1261
return action;
1262
}
1263
1264
static void
1265
cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
1266
bool del_rt, bool del_peer)
1267
{
1268
struct fib6_table *table;
1269
struct fib6_info *f6i;
1270
1271
f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
1272
ifp->prefix_len,
1273
ifp->idev->dev, 0, RTF_DEFAULT, true);
1274
if (f6i) {
1275
if (del_rt)
1276
ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
1277
else {
1278
if (!(f6i->fib6_flags & RTF_EXPIRES)) {
1279
table = f6i->fib6_table;
1280
spin_lock_bh(&table->tb6_lock);
1281
1282
fib6_set_expires(f6i, expires);
1283
fib6_add_gc_list(f6i);
1284
1285
spin_unlock_bh(&table->tb6_lock);
1286
}
1287
fib6_info_release(f6i);
1288
}
1289
}
1290
}
1291
1292
1293
/* This function wants to get referenced ifp and releases it before return */
1294
1295
static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1296
{
1297
enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1298
struct net *net = dev_net(ifp->idev->dev);
1299
unsigned long expires;
1300
int state;
1301
1302
ASSERT_RTNL();
1303
1304
spin_lock_bh(&ifp->lock);
1305
state = ifp->state;
1306
ifp->state = INET6_IFADDR_STATE_DEAD;
1307
spin_unlock_bh(&ifp->lock);
1308
1309
if (state == INET6_IFADDR_STATE_DEAD)
1310
goto out;
1311
1312
spin_lock_bh(&net->ipv6.addrconf_hash_lock);
1313
hlist_del_init_rcu(&ifp->addr_lst);
1314
spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
1315
1316
write_lock_bh(&ifp->idev->lock);
1317
1318
if (ifp->flags&IFA_F_TEMPORARY) {
1319
list_del(&ifp->tmp_list);
1320
if (ifp->ifpub) {
1321
in6_ifa_put(ifp->ifpub);
1322
ifp->ifpub = NULL;
1323
}
1324
__in6_ifa_put(ifp);
1325
}
1326
1327
if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1328
action = check_cleanup_prefix_route(ifp, &expires);
1329
1330
list_del_rcu(&ifp->if_list);
1331
__in6_ifa_put(ifp);
1332
1333
write_unlock_bh(&ifp->idev->lock);
1334
1335
addrconf_del_dad_work(ifp);
1336
1337
ipv6_ifa_notify(RTM_DELADDR, ifp);
1338
1339
inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1340
1341
if (action != CLEANUP_PREFIX_RT_NOP) {
1342
cleanup_prefix_route(ifp, expires,
1343
action == CLEANUP_PREFIX_RT_DEL, false);
1344
}
1345
1346
/* clean up prefsrc entries */
1347
rt6_remove_prefsrc(ifp);
1348
out:
1349
in6_ifa_put(ifp);
1350
}
1351
1352
static unsigned long ipv6_get_regen_advance(const struct inet6_dev *idev)
1353
{
1354
return READ_ONCE(idev->cnf.regen_min_advance) +
1355
READ_ONCE(idev->cnf.regen_max_retry) *
1356
READ_ONCE(idev->cnf.dad_transmits) *
1357
max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
1358
}
1359
1360
static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
1361
{
1362
struct inet6_dev *idev = ifp->idev;
1363
unsigned long tmp_tstamp, age;
1364
unsigned long regen_advance;
1365
unsigned long now = jiffies;
1366
u32 if_public_preferred_lft;
1367
s32 cnf_temp_preferred_lft;
1368
struct inet6_ifaddr *ift;
1369
struct ifa6_config cfg;
1370
long max_desync_factor;
1371
struct in6_addr addr;
1372
int ret = 0;
1373
1374
write_lock_bh(&idev->lock);
1375
1376
retry:
1377
in6_dev_hold(idev);
1378
if (READ_ONCE(idev->cnf.use_tempaddr) <= 0) {
1379
write_unlock_bh(&idev->lock);
1380
pr_info("%s: use_tempaddr is disabled\n", __func__);
1381
in6_dev_put(idev);
1382
ret = -1;
1383
goto out;
1384
}
1385
spin_lock_bh(&ifp->lock);
1386
if (ifp->regen_count++ >= READ_ONCE(idev->cnf.regen_max_retry)) {
1387
WRITE_ONCE(idev->cnf.use_tempaddr, -1); /*XXX*/
1388
spin_unlock_bh(&ifp->lock);
1389
write_unlock_bh(&idev->lock);
1390
pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1391
__func__);
1392
in6_dev_put(idev);
1393
ret = -1;
1394
goto out;
1395
}
1396
in6_ifa_hold(ifp);
1397
memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1398
ipv6_gen_rnd_iid(&addr);
1399
1400
age = (now - ifp->tstamp) / HZ;
1401
1402
regen_advance = ipv6_get_regen_advance(idev);
1403
1404
/* recalculate max_desync_factor each time and update
1405
* idev->desync_factor if it's larger
1406
*/
1407
cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1408
max_desync_factor = min_t(long,
1409
READ_ONCE(idev->cnf.max_desync_factor),
1410
cnf_temp_preferred_lft - regen_advance);
1411
1412
if (unlikely(idev->desync_factor > max_desync_factor)) {
1413
if (max_desync_factor > 0) {
1414
get_random_bytes(&idev->desync_factor,
1415
sizeof(idev->desync_factor));
1416
idev->desync_factor %= max_desync_factor;
1417
} else {
1418
idev->desync_factor = 0;
1419
}
1420
}
1421
1422
if_public_preferred_lft = ifp->prefered_lft;
1423
1424
memset(&cfg, 0, sizeof(cfg));
1425
cfg.valid_lft = min_t(__u32, ifp->valid_lft,
1426
READ_ONCE(idev->cnf.temp_valid_lft) + age);
1427
cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
1428
cfg.preferred_lft = min_t(__u32, if_public_preferred_lft, cfg.preferred_lft);
1429
cfg.preferred_lft = min_t(__u32, cfg.valid_lft, cfg.preferred_lft);
1430
1431
cfg.plen = ifp->prefix_len;
1432
tmp_tstamp = ifp->tstamp;
1433
spin_unlock_bh(&ifp->lock);
1434
1435
write_unlock_bh(&idev->lock);
1436
1437
/* From RFC 4941:
1438
*
1439
* A temporary address is created only if this calculated Preferred
1440
* Lifetime is greater than REGEN_ADVANCE time units. In
1441
* particular, an implementation must not create a temporary address
1442
* with a zero Preferred Lifetime.
1443
*
1444
* ...
1445
*
1446
* When creating a temporary address, the lifetime values MUST be
1447
* derived from the corresponding prefix as follows:
1448
*
1449
* ...
1450
*
1451
* * Its Preferred Lifetime is the lower of the Preferred Lifetime
1452
* of the public address or TEMP_PREFERRED_LIFETIME -
1453
* DESYNC_FACTOR.
1454
*
1455
* To comply with the RFC's requirements, clamp the preferred lifetime
1456
* to a minimum of regen_advance, unless that would exceed valid_lft or
1457
* ifp->prefered_lft.
1458
*
1459
* Use age calculation as in addrconf_verify to avoid unnecessary
1460
* temporary addresses being generated.
1461
*/
1462
age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1463
if (cfg.preferred_lft <= regen_advance + age) {
1464
cfg.preferred_lft = regen_advance + age + 1;
1465
if (cfg.preferred_lft > cfg.valid_lft ||
1466
cfg.preferred_lft > if_public_preferred_lft) {
1467
in6_ifa_put(ifp);
1468
in6_dev_put(idev);
1469
ret = -1;
1470
goto out;
1471
}
1472
}
1473
1474
cfg.ifa_flags = IFA_F_TEMPORARY;
1475
/* set in addrconf_prefix_rcv() */
1476
if (ifp->flags & IFA_F_OPTIMISTIC)
1477
cfg.ifa_flags |= IFA_F_OPTIMISTIC;
1478
1479
cfg.pfx = &addr;
1480
cfg.scope = ipv6_addr_scope(cfg.pfx);
1481
1482
ift = ipv6_add_addr(idev, &cfg, block, NULL);
1483
if (IS_ERR(ift)) {
1484
in6_ifa_put(ifp);
1485
in6_dev_put(idev);
1486
pr_info("%s: retry temporary address regeneration\n", __func__);
1487
write_lock_bh(&idev->lock);
1488
goto retry;
1489
}
1490
1491
spin_lock_bh(&ift->lock);
1492
ift->ifpub = ifp;
1493
ift->cstamp = now;
1494
ift->tstamp = tmp_tstamp;
1495
spin_unlock_bh(&ift->lock);
1496
1497
addrconf_dad_start(ift);
1498
in6_ifa_put(ift);
1499
in6_dev_put(idev);
1500
out:
1501
return ret;
1502
}
1503
1504
/*
1505
* Choose an appropriate source address (RFC3484)
1506
*/
1507
enum {
1508
IPV6_SADDR_RULE_INIT = 0,
1509
IPV6_SADDR_RULE_LOCAL,
1510
IPV6_SADDR_RULE_SCOPE,
1511
IPV6_SADDR_RULE_PREFERRED,
1512
#ifdef CONFIG_IPV6_MIP6
1513
IPV6_SADDR_RULE_HOA,
1514
#endif
1515
IPV6_SADDR_RULE_OIF,
1516
IPV6_SADDR_RULE_LABEL,
1517
IPV6_SADDR_RULE_PRIVACY,
1518
IPV6_SADDR_RULE_ORCHID,
1519
IPV6_SADDR_RULE_PREFIX,
1520
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1521
IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1522
#endif
1523
IPV6_SADDR_RULE_MAX
1524
};
1525
1526
struct ipv6_saddr_score {
1527
int rule;
1528
int addr_type;
1529
struct inet6_ifaddr *ifa;
1530
DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1531
int scopedist;
1532
int matchlen;
1533
};
1534
1535
struct ipv6_saddr_dst {
1536
const struct in6_addr *addr;
1537
int ifindex;
1538
int scope;
1539
int label;
1540
unsigned int prefs;
1541
};
1542
1543
static inline int ipv6_saddr_preferred(int type)
1544
{
1545
if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1546
return 1;
1547
return 0;
1548
}
1549
1550
static bool ipv6_use_optimistic_addr(const struct net *net,
1551
const struct inet6_dev *idev)
1552
{
1553
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1554
if (!idev)
1555
return false;
1556
if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
1557
!READ_ONCE(idev->cnf.optimistic_dad))
1558
return false;
1559
if (!READ_ONCE(net->ipv6.devconf_all->use_optimistic) &&
1560
!READ_ONCE(idev->cnf.use_optimistic))
1561
return false;
1562
1563
return true;
1564
#else
1565
return false;
1566
#endif
1567
}
1568
1569
static bool ipv6_allow_optimistic_dad(const struct net *net,
1570
const struct inet6_dev *idev)
1571
{
1572
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1573
if (!idev)
1574
return false;
1575
if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) &&
1576
!READ_ONCE(idev->cnf.optimistic_dad))
1577
return false;
1578
1579
return true;
1580
#else
1581
return false;
1582
#endif
1583
}
1584
1585
static int ipv6_get_saddr_eval(struct net *net,
1586
struct ipv6_saddr_score *score,
1587
struct ipv6_saddr_dst *dst,
1588
int i)
1589
{
1590
int ret;
1591
1592
if (i <= score->rule) {
1593
switch (i) {
1594
case IPV6_SADDR_RULE_SCOPE:
1595
ret = score->scopedist;
1596
break;
1597
case IPV6_SADDR_RULE_PREFIX:
1598
ret = score->matchlen;
1599
break;
1600
default:
1601
ret = !!test_bit(i, score->scorebits);
1602
}
1603
goto out;
1604
}
1605
1606
switch (i) {
1607
case IPV6_SADDR_RULE_INIT:
1608
/* Rule 0: remember if hiscore is not ready yet */
1609
ret = !!score->ifa;
1610
break;
1611
case IPV6_SADDR_RULE_LOCAL:
1612
/* Rule 1: Prefer same address */
1613
ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1614
break;
1615
case IPV6_SADDR_RULE_SCOPE:
1616
/* Rule 2: Prefer appropriate scope
1617
*
1618
* ret
1619
* ^
1620
* -1 | d 15
1621
* ---+--+-+---> scope
1622
* |
1623
* | d is scope of the destination.
1624
* B-d | \
1625
* | \ <- smaller scope is better if
1626
* B-15 | \ if scope is enough for destination.
1627
* | ret = B - scope (-1 <= scope >= d <= 15).
1628
* d-C-1 | /
1629
* |/ <- greater is better
1630
* -C / if scope is not enough for destination.
1631
* /| ret = scope - C (-1 <= d < scope <= 15).
1632
*
1633
* d - C - 1 < B -15 (for all -1 <= d <= 15).
1634
* C > d + 14 - B >= 15 + 14 - B = 29 - B.
1635
* Assume B = 0 and we get C > 29.
1636
*/
1637
ret = __ipv6_addr_src_scope(score->addr_type);
1638
if (ret >= dst->scope)
1639
ret = -ret;
1640
else
1641
ret -= 128; /* 30 is enough */
1642
score->scopedist = ret;
1643
break;
1644
case IPV6_SADDR_RULE_PREFERRED:
1645
{
1646
/* Rule 3: Avoid deprecated and optimistic addresses */
1647
u8 avoid = IFA_F_DEPRECATED;
1648
1649
if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1650
avoid |= IFA_F_OPTIMISTIC;
1651
ret = ipv6_saddr_preferred(score->addr_type) ||
1652
!(score->ifa->flags & avoid);
1653
break;
1654
}
1655
#ifdef CONFIG_IPV6_MIP6
1656
case IPV6_SADDR_RULE_HOA:
1657
{
1658
/* Rule 4: Prefer home address */
1659
int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1660
ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1661
break;
1662
}
1663
#endif
1664
case IPV6_SADDR_RULE_OIF:
1665
/* Rule 5: Prefer outgoing interface */
1666
ret = (!dst->ifindex ||
1667
dst->ifindex == score->ifa->idev->dev->ifindex);
1668
break;
1669
case IPV6_SADDR_RULE_LABEL:
1670
/* Rule 6: Prefer matching label */
1671
ret = ipv6_addr_label(net,
1672
&score->ifa->addr, score->addr_type,
1673
score->ifa->idev->dev->ifindex) == dst->label;
1674
break;
1675
case IPV6_SADDR_RULE_PRIVACY:
1676
{
1677
/* Rule 7: Prefer public address
1678
* Note: prefer temporary address if use_tempaddr >= 2
1679
*/
1680
int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1681
!!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1682
READ_ONCE(score->ifa->idev->cnf.use_tempaddr) >= 2;
1683
ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1684
break;
1685
}
1686
case IPV6_SADDR_RULE_ORCHID:
1687
/* Rule 8-: Prefer ORCHID vs ORCHID or
1688
* non-ORCHID vs non-ORCHID
1689
*/
1690
ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1691
ipv6_addr_orchid(dst->addr));
1692
break;
1693
case IPV6_SADDR_RULE_PREFIX:
1694
/* Rule 8: Use longest matching prefix */
1695
ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1696
if (ret > score->ifa->prefix_len)
1697
ret = score->ifa->prefix_len;
1698
score->matchlen = ret;
1699
break;
1700
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1701
case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1702
/* Optimistic addresses still have lower precedence than other
1703
* preferred addresses.
1704
*/
1705
ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1706
break;
1707
#endif
1708
default:
1709
ret = 0;
1710
}
1711
1712
if (ret)
1713
__set_bit(i, score->scorebits);
1714
score->rule = i;
1715
out:
1716
return ret;
1717
}
1718
1719
static int __ipv6_dev_get_saddr(struct net *net,
1720
struct ipv6_saddr_dst *dst,
1721
struct inet6_dev *idev,
1722
struct ipv6_saddr_score *scores,
1723
int hiscore_idx)
1724
{
1725
struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1726
1727
list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1728
int i;
1729
1730
/*
1731
* - Tentative Address (RFC2462 section 5.4)
1732
* - A tentative address is not considered
1733
* "assigned to an interface" in the traditional
1734
* sense, unless it is also flagged as optimistic.
1735
* - Candidate Source Address (section 4)
1736
* - In any case, anycast addresses, multicast
1737
* addresses, and the unspecified address MUST
1738
* NOT be included in a candidate set.
1739
*/
1740
if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1741
(!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1742
continue;
1743
1744
score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1745
1746
if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1747
score->addr_type & IPV6_ADDR_MULTICAST)) {
1748
net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1749
idev->dev->name);
1750
continue;
1751
}
1752
1753
score->rule = -1;
1754
bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1755
1756
for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1757
int minihiscore, miniscore;
1758
1759
minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1760
miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1761
1762
if (minihiscore > miniscore) {
1763
if (i == IPV6_SADDR_RULE_SCOPE &&
1764
score->scopedist > 0) {
1765
/*
1766
* special case:
1767
* each remaining entry
1768
* has too small (not enough)
1769
* scope, because ifa entries
1770
* are sorted by their scope
1771
* values.
1772
*/
1773
goto out;
1774
}
1775
break;
1776
} else if (minihiscore < miniscore) {
1777
swap(hiscore, score);
1778
hiscore_idx = 1 - hiscore_idx;
1779
1780
/* restore our iterator */
1781
score->ifa = hiscore->ifa;
1782
1783
break;
1784
}
1785
}
1786
}
1787
out:
1788
return hiscore_idx;
1789
}
1790
1791
static int ipv6_get_saddr_master(struct net *net,
1792
const struct net_device *dst_dev,
1793
const struct net_device *master,
1794
struct ipv6_saddr_dst *dst,
1795
struct ipv6_saddr_score *scores,
1796
int hiscore_idx)
1797
{
1798
struct inet6_dev *idev;
1799
1800
idev = __in6_dev_get(dst_dev);
1801
if (idev)
1802
hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1803
scores, hiscore_idx);
1804
1805
idev = __in6_dev_get(master);
1806
if (idev)
1807
hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1808
scores, hiscore_idx);
1809
1810
return hiscore_idx;
1811
}
1812
1813
int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1814
const struct in6_addr *daddr, unsigned int prefs,
1815
struct in6_addr *saddr)
1816
{
1817
struct ipv6_saddr_score scores[2], *hiscore;
1818
struct ipv6_saddr_dst dst;
1819
struct inet6_dev *idev;
1820
struct net_device *dev;
1821
int dst_type;
1822
bool use_oif_addr = false;
1823
int hiscore_idx = 0;
1824
int ret = 0;
1825
1826
dst_type = __ipv6_addr_type(daddr);
1827
dst.addr = daddr;
1828
dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1829
dst.scope = __ipv6_addr_src_scope(dst_type);
1830
dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1831
dst.prefs = prefs;
1832
1833
scores[hiscore_idx].rule = -1;
1834
scores[hiscore_idx].ifa = NULL;
1835
1836
rcu_read_lock();
1837
1838
/* Candidate Source Address (section 4)
1839
* - multicast and link-local destination address,
1840
* the set of candidate source address MUST only
1841
* include addresses assigned to interfaces
1842
* belonging to the same link as the outgoing
1843
* interface.
1844
* (- For site-local destination addresses, the
1845
* set of candidate source addresses MUST only
1846
* include addresses assigned to interfaces
1847
* belonging to the same site as the outgoing
1848
* interface.)
1849
* - "It is RECOMMENDED that the candidate source addresses
1850
* be the set of unicast addresses assigned to the
1851
* interface that will be used to send to the destination
1852
* (the 'outgoing' interface)." (RFC 6724)
1853
*/
1854
if (dst_dev) {
1855
idev = __in6_dev_get(dst_dev);
1856
if ((dst_type & IPV6_ADDR_MULTICAST) ||
1857
dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1858
(idev && READ_ONCE(idev->cnf.use_oif_addrs_only))) {
1859
use_oif_addr = true;
1860
}
1861
}
1862
1863
if (use_oif_addr) {
1864
if (idev)
1865
hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1866
} else {
1867
const struct net_device *master;
1868
int master_idx = 0;
1869
1870
/* if dst_dev exists and is enslaved to an L3 device, then
1871
* prefer addresses from dst_dev and then the master over
1872
* any other enslaved devices in the L3 domain.
1873
*/
1874
master = l3mdev_master_dev_rcu(dst_dev);
1875
if (master) {
1876
master_idx = master->ifindex;
1877
1878
hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1879
master, &dst,
1880
scores, hiscore_idx);
1881
1882
if (scores[hiscore_idx].ifa &&
1883
scores[hiscore_idx].scopedist >= 0)
1884
goto out;
1885
}
1886
1887
for_each_netdev_rcu(net, dev) {
1888
/* only consider addresses on devices in the
1889
* same L3 domain
1890
*/
1891
if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1892
continue;
1893
idev = __in6_dev_get(dev);
1894
if (!idev)
1895
continue;
1896
hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1897
}
1898
}
1899
1900
out:
1901
hiscore = &scores[hiscore_idx];
1902
if (!hiscore->ifa)
1903
ret = -EADDRNOTAVAIL;
1904
else
1905
*saddr = hiscore->ifa->addr;
1906
1907
rcu_read_unlock();
1908
return ret;
1909
}
1910
EXPORT_SYMBOL(ipv6_dev_get_saddr);
1911
1912
static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1913
u32 banned_flags)
1914
{
1915
struct inet6_ifaddr *ifp;
1916
int err = -EADDRNOTAVAIL;
1917
1918
list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1919
if (ifp->scope > IFA_LINK)
1920
break;
1921
if (ifp->scope == IFA_LINK &&
1922
!(ifp->flags & banned_flags)) {
1923
*addr = ifp->addr;
1924
err = 0;
1925
break;
1926
}
1927
}
1928
return err;
1929
}
1930
1931
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1932
u32 banned_flags)
1933
{
1934
struct inet6_dev *idev;
1935
int err = -EADDRNOTAVAIL;
1936
1937
rcu_read_lock();
1938
idev = __in6_dev_get(dev);
1939
if (idev) {
1940
read_lock_bh(&idev->lock);
1941
err = __ipv6_get_lladdr(idev, addr, banned_flags);
1942
read_unlock_bh(&idev->lock);
1943
}
1944
rcu_read_unlock();
1945
return err;
1946
}
1947
1948
static int ipv6_count_addresses(const struct inet6_dev *idev)
1949
{
1950
const struct inet6_ifaddr *ifp;
1951
int cnt = 0;
1952
1953
rcu_read_lock();
1954
list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1955
cnt++;
1956
rcu_read_unlock();
1957
return cnt;
1958
}
1959
1960
int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1961
const struct net_device *dev, int strict)
1962
{
1963
return ipv6_chk_addr_and_flags(net, addr, dev, !dev,
1964
strict, IFA_F_TENTATIVE);
1965
}
1966
EXPORT_SYMBOL(ipv6_chk_addr);
1967
1968
/* device argument is used to find the L3 domain of interest. If
1969
* skip_dev_check is set, then the ifp device is not checked against
1970
* the passed in dev argument. So the 2 cases for addresses checks are:
1971
* 1. does the address exist in the L3 domain that dev is part of
1972
* (skip_dev_check = true), or
1973
*
1974
* 2. does the address exist on the specific device
1975
* (skip_dev_check = false)
1976
*/
1977
static struct net_device *
1978
__ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1979
const struct net_device *dev, bool skip_dev_check,
1980
int strict, u32 banned_flags)
1981
{
1982
unsigned int hash = inet6_addr_hash(net, addr);
1983
struct net_device *l3mdev, *ndev;
1984
struct inet6_ifaddr *ifp;
1985
u32 ifp_flags;
1986
1987
rcu_read_lock();
1988
1989
l3mdev = l3mdev_master_dev_rcu(dev);
1990
if (skip_dev_check)
1991
dev = NULL;
1992
1993
hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1994
ndev = ifp->idev->dev;
1995
1996
if (l3mdev_master_dev_rcu(ndev) != l3mdev)
1997
continue;
1998
1999
/* Decouple optimistic from tentative for evaluation here.
2000
* Ban optimistic addresses explicitly, when required.
2001
*/
2002
ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
2003
? (ifp->flags&~IFA_F_TENTATIVE)
2004
: ifp->flags;
2005
if (ipv6_addr_equal(&ifp->addr, addr) &&
2006
!(ifp_flags&banned_flags) &&
2007
(!dev || ndev == dev ||
2008
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
2009
rcu_read_unlock();
2010
return ndev;
2011
}
2012
}
2013
2014
rcu_read_unlock();
2015
return NULL;
2016
}
2017
2018
int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
2019
const struct net_device *dev, bool skip_dev_check,
2020
int strict, u32 banned_flags)
2021
{
2022
return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check,
2023
strict, banned_flags) ? 1 : 0;
2024
}
2025
EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
2026
2027
2028
/* Compares an address/prefix_len with addresses on device @dev.
2029
* If one is found it returns true.
2030
*/
2031
bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
2032
const unsigned int prefix_len, struct net_device *dev)
2033
{
2034
const struct inet6_ifaddr *ifa;
2035
const struct inet6_dev *idev;
2036
bool ret = false;
2037
2038
rcu_read_lock();
2039
idev = __in6_dev_get(dev);
2040
if (idev) {
2041
list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
2042
ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
2043
if (ret)
2044
break;
2045
}
2046
}
2047
rcu_read_unlock();
2048
2049
return ret;
2050
}
2051
EXPORT_SYMBOL(ipv6_chk_custom_prefix);
2052
2053
int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
2054
{
2055
const struct inet6_ifaddr *ifa;
2056
const struct inet6_dev *idev;
2057
int onlink;
2058
2059
onlink = 0;
2060
rcu_read_lock();
2061
idev = __in6_dev_get(dev);
2062
if (idev) {
2063
list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
2064
onlink = ipv6_prefix_equal(addr, &ifa->addr,
2065
ifa->prefix_len);
2066
if (onlink)
2067
break;
2068
}
2069
}
2070
rcu_read_unlock();
2071
return onlink;
2072
}
2073
EXPORT_SYMBOL(ipv6_chk_prefix);
2074
2075
/**
2076
* ipv6_dev_find - find the first device with a given source address.
2077
* @net: the net namespace
2078
* @addr: the source address
2079
* @dev: used to find the L3 domain of interest
2080
*
2081
* The caller should be protected by RCU, or RTNL.
2082
*/
2083
struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
2084
struct net_device *dev)
2085
{
2086
return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1,
2087
IFA_F_TENTATIVE);
2088
}
2089
EXPORT_SYMBOL(ipv6_dev_find);
2090
2091
struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
2092
struct net_device *dev, int strict)
2093
{
2094
unsigned int hash = inet6_addr_hash(net, addr);
2095
struct inet6_ifaddr *ifp, *result = NULL;
2096
2097
rcu_read_lock();
2098
hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
2099
if (ipv6_addr_equal(&ifp->addr, addr)) {
2100
if (!dev || ifp->idev->dev == dev ||
2101
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
2102
if (in6_ifa_hold_safe(ifp)) {
2103
result = ifp;
2104
break;
2105
}
2106
}
2107
}
2108
}
2109
rcu_read_unlock();
2110
2111
return result;
2112
}
2113
2114
/* Gets referenced address, destroys ifaddr */
2115
2116
static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
2117
{
2118
if (dad_failed)
2119
ifp->flags |= IFA_F_DADFAILED;
2120
2121
if (ifp->flags&IFA_F_TEMPORARY) {
2122
struct inet6_ifaddr *ifpub;
2123
spin_lock_bh(&ifp->lock);
2124
ifpub = ifp->ifpub;
2125
if (ifpub) {
2126
in6_ifa_hold(ifpub);
2127
spin_unlock_bh(&ifp->lock);
2128
ipv6_create_tempaddr(ifpub, true);
2129
in6_ifa_put(ifpub);
2130
} else {
2131
spin_unlock_bh(&ifp->lock);
2132
}
2133
ipv6_del_addr(ifp);
2134
} else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
2135
spin_lock_bh(&ifp->lock);
2136
addrconf_del_dad_work(ifp);
2137
ifp->flags |= IFA_F_TENTATIVE;
2138
if (dad_failed)
2139
ifp->flags &= ~IFA_F_OPTIMISTIC;
2140
spin_unlock_bh(&ifp->lock);
2141
if (dad_failed)
2142
ipv6_ifa_notify(0, ifp);
2143
in6_ifa_put(ifp);
2144
} else {
2145
ipv6_del_addr(ifp);
2146
}
2147
}
2148
2149
static int addrconf_dad_end(struct inet6_ifaddr *ifp)
2150
{
2151
int err = -ENOENT;
2152
2153
spin_lock_bh(&ifp->lock);
2154
if (ifp->state == INET6_IFADDR_STATE_DAD) {
2155
ifp->state = INET6_IFADDR_STATE_POSTDAD;
2156
err = 0;
2157
}
2158
spin_unlock_bh(&ifp->lock);
2159
2160
return err;
2161
}
2162
2163
void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
2164
{
2165
struct inet6_dev *idev = ifp->idev;
2166
struct net *net = dev_net(idev->dev);
2167
int max_addresses;
2168
2169
if (addrconf_dad_end(ifp)) {
2170
in6_ifa_put(ifp);
2171
return;
2172
}
2173
2174
net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2175
ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2176
2177
spin_lock_bh(&ifp->lock);
2178
2179
if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2180
struct in6_addr new_addr;
2181
struct inet6_ifaddr *ifp2;
2182
int retries = ifp->stable_privacy_retry + 1;
2183
struct ifa6_config cfg = {
2184
.pfx = &new_addr,
2185
.plen = ifp->prefix_len,
2186
.ifa_flags = ifp->flags,
2187
.valid_lft = ifp->valid_lft,
2188
.preferred_lft = ifp->prefered_lft,
2189
.scope = ifp->scope,
2190
};
2191
2192
if (retries > net->ipv6.sysctl.idgen_retries) {
2193
net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2194
ifp->idev->dev->name);
2195
goto errdad;
2196
}
2197
2198
new_addr = ifp->addr;
2199
if (ipv6_generate_stable_address(&new_addr, retries,
2200
idev))
2201
goto errdad;
2202
2203
spin_unlock_bh(&ifp->lock);
2204
2205
max_addresses = READ_ONCE(idev->cnf.max_addresses);
2206
if (max_addresses &&
2207
ipv6_count_addresses(idev) >= max_addresses)
2208
goto lock_errdad;
2209
2210
net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2211
ifp->idev->dev->name);
2212
2213
ifp2 = ipv6_add_addr(idev, &cfg, false, NULL);
2214
if (IS_ERR(ifp2))
2215
goto lock_errdad;
2216
2217
spin_lock_bh(&ifp2->lock);
2218
ifp2->stable_privacy_retry = retries;
2219
ifp2->state = INET6_IFADDR_STATE_PREDAD;
2220
spin_unlock_bh(&ifp2->lock);
2221
2222
addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2223
in6_ifa_put(ifp2);
2224
lock_errdad:
2225
spin_lock_bh(&ifp->lock);
2226
}
2227
2228
errdad:
2229
/* transition from _POSTDAD to _ERRDAD */
2230
ifp->state = INET6_IFADDR_STATE_ERRDAD;
2231
spin_unlock_bh(&ifp->lock);
2232
2233
addrconf_mod_dad_work(ifp, 0);
2234
in6_ifa_put(ifp);
2235
}
2236
2237
/* Join to solicited addr multicast group. */
2238
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2239
{
2240
struct in6_addr maddr;
2241
2242
if (READ_ONCE(dev->flags) & (IFF_LOOPBACK | IFF_NOARP))
2243
return;
2244
2245
addrconf_addr_solict_mult(addr, &maddr);
2246
ipv6_dev_mc_inc(dev, &maddr);
2247
}
2248
2249
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2250
{
2251
struct in6_addr maddr;
2252
2253
if (READ_ONCE(idev->dev->flags) & (IFF_LOOPBACK | IFF_NOARP))
2254
return;
2255
2256
addrconf_addr_solict_mult(addr, &maddr);
2257
__ipv6_dev_mc_dec(idev, &maddr);
2258
}
2259
2260
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2261
{
2262
struct in6_addr addr;
2263
2264
if (ifp->prefix_len >= 127) /* RFC 6164 */
2265
return;
2266
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2267
if (ipv6_addr_any(&addr))
2268
return;
2269
__ipv6_dev_ac_inc(ifp->idev, &addr);
2270
}
2271
2272
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2273
{
2274
struct in6_addr addr;
2275
2276
if (ifp->prefix_len >= 127) /* RFC 6164 */
2277
return;
2278
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2279
if (ipv6_addr_any(&addr))
2280
return;
2281
__ipv6_dev_ac_dec(ifp->idev, &addr);
2282
}
2283
2284
static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2285
{
2286
switch (dev->addr_len) {
2287
case ETH_ALEN:
2288
memcpy(eui, dev->dev_addr, 3);
2289
eui[3] = 0xFF;
2290
eui[4] = 0xFE;
2291
memcpy(eui + 5, dev->dev_addr + 3, 3);
2292
break;
2293
case EUI64_ADDR_LEN:
2294
memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2295
eui[0] ^= 2;
2296
break;
2297
default:
2298
return -1;
2299
}
2300
2301
return 0;
2302
}
2303
2304
static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2305
{
2306
const union fwnet_hwaddr *ha;
2307
2308
if (dev->addr_len != FWNET_ALEN)
2309
return -1;
2310
2311
ha = (const union fwnet_hwaddr *)dev->dev_addr;
2312
2313
memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2314
eui[0] ^= 2;
2315
return 0;
2316
}
2317
2318
static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2319
{
2320
/* XXX: inherit EUI-64 from other interface -- yoshfuji */
2321
if (dev->addr_len != ARCNET_ALEN)
2322
return -1;
2323
memset(eui, 0, 7);
2324
eui[7] = *(u8 *)dev->dev_addr;
2325
return 0;
2326
}
2327
2328
static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2329
{
2330
if (dev->addr_len != INFINIBAND_ALEN)
2331
return -1;
2332
memcpy(eui, dev->dev_addr + 12, 8);
2333
eui[0] |= 2;
2334
return 0;
2335
}
2336
2337
static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2338
{
2339
if (addr == 0)
2340
return -1;
2341
eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2342
ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2343
ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2344
ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2345
ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2346
ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2347
eui[1] = 0;
2348
eui[2] = 0x5E;
2349
eui[3] = 0xFE;
2350
memcpy(eui + 4, &addr, 4);
2351
return 0;
2352
}
2353
2354
static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2355
{
2356
if (dev->priv_flags & IFF_ISATAP)
2357
return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2358
return -1;
2359
}
2360
2361
static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2362
{
2363
return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2364
}
2365
2366
static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2367
{
2368
memcpy(eui, dev->perm_addr, 3);
2369
memcpy(eui + 5, dev->perm_addr + 3, 3);
2370
eui[3] = 0xFF;
2371
eui[4] = 0xFE;
2372
eui[0] ^= 2;
2373
return 0;
2374
}
2375
2376
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2377
{
2378
switch (dev->type) {
2379
case ARPHRD_ETHER:
2380
case ARPHRD_FDDI:
2381
return addrconf_ifid_eui48(eui, dev);
2382
case ARPHRD_ARCNET:
2383
return addrconf_ifid_arcnet(eui, dev);
2384
case ARPHRD_INFINIBAND:
2385
return addrconf_ifid_infiniband(eui, dev);
2386
case ARPHRD_SIT:
2387
return addrconf_ifid_sit(eui, dev);
2388
case ARPHRD_IPGRE:
2389
case ARPHRD_TUNNEL:
2390
return addrconf_ifid_gre(eui, dev);
2391
case ARPHRD_6LOWPAN:
2392
return addrconf_ifid_6lowpan(eui, dev);
2393
case ARPHRD_IEEE1394:
2394
return addrconf_ifid_ieee1394(eui, dev);
2395
case ARPHRD_TUNNEL6:
2396
case ARPHRD_IP6GRE:
2397
case ARPHRD_RAWIP:
2398
return addrconf_ifid_ip6tnl(eui, dev);
2399
}
2400
return -1;
2401
}
2402
2403
static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2404
{
2405
int err = -1;
2406
struct inet6_ifaddr *ifp;
2407
2408
read_lock_bh(&idev->lock);
2409
list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2410
if (ifp->scope > IFA_LINK)
2411
break;
2412
if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2413
memcpy(eui, ifp->addr.s6_addr+8, 8);
2414
err = 0;
2415
break;
2416
}
2417
}
2418
read_unlock_bh(&idev->lock);
2419
return err;
2420
}
2421
2422
/* Generation of a randomized Interface Identifier
2423
* draft-ietf-6man-rfc4941bis, Section 3.3.1
2424
*/
2425
2426
static void ipv6_gen_rnd_iid(struct in6_addr *addr)
2427
{
2428
regen:
2429
get_random_bytes(&addr->s6_addr[8], 8);
2430
2431
/* <draft-ietf-6man-rfc4941bis-08.txt>, Section 3.3.1:
2432
* check if generated address is not inappropriate:
2433
*
2434
* - Reserved IPv6 Interface Identifiers
2435
* - XXX: already assigned to an address on the device
2436
*/
2437
2438
/* Subnet-router anycast: 0000:0000:0000:0000 */
2439
if (!(addr->s6_addr32[2] | addr->s6_addr32[3]))
2440
goto regen;
2441
2442
/* IANA Ethernet block: 0200:5EFF:FE00:0000-0200:5EFF:FE00:5212
2443
* Proxy Mobile IPv6: 0200:5EFF:FE00:5213
2444
* IANA Ethernet block: 0200:5EFF:FE00:5214-0200:5EFF:FEFF:FFFF
2445
*/
2446
if (ntohl(addr->s6_addr32[2]) == 0x02005eff &&
2447
(ntohl(addr->s6_addr32[3]) & 0Xff000000) == 0xfe000000)
2448
goto regen;
2449
2450
/* Reserved subnet anycast addresses */
2451
if (ntohl(addr->s6_addr32[2]) == 0xfdffffff &&
2452
ntohl(addr->s6_addr32[3]) >= 0Xffffff80)
2453
goto regen;
2454
}
2455
2456
/*
2457
* Add prefix route.
2458
*/
2459
2460
static void
2461
addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric,
2462
struct net_device *dev, unsigned long expires,
2463
u32 flags, gfp_t gfp_flags)
2464
{
2465
struct fib6_config cfg = {
2466
.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2467
.fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
2468
.fc_ifindex = dev->ifindex,
2469
.fc_expires = expires,
2470
.fc_dst_len = plen,
2471
.fc_flags = RTF_UP | flags,
2472
.fc_nlinfo.nl_net = dev_net(dev),
2473
.fc_protocol = RTPROT_KERNEL,
2474
.fc_type = RTN_UNICAST,
2475
};
2476
2477
cfg.fc_dst = *pfx;
2478
2479
/* Prevent useless cloning on PtP SIT.
2480
This thing is done here expecting that the whole
2481
class of non-broadcast devices need not cloning.
2482
*/
2483
#if IS_ENABLED(CONFIG_IPV6_SIT)
2484
if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2485
cfg.fc_flags |= RTF_NONEXTHOP;
2486
#endif
2487
2488
ip6_route_add(&cfg, gfp_flags, NULL);
2489
}
2490
2491
2492
static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2493
int plen,
2494
const struct net_device *dev,
2495
u32 flags, u32 noflags,
2496
bool no_gw)
2497
{
2498
struct fib6_node *fn;
2499
struct fib6_info *rt = NULL;
2500
struct fib6_table *table;
2501
u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2502
2503
table = fib6_get_table(dev_net(dev), tb_id);
2504
if (!table)
2505
return NULL;
2506
2507
rcu_read_lock();
2508
fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2509
if (!fn)
2510
goto out;
2511
2512
for_each_fib6_node_rt_rcu(fn) {
2513
/* prefix routes only use builtin fib6_nh */
2514
if (rt->nh)
2515
continue;
2516
2517
if (rt->fib6_nh->fib_nh_dev->ifindex != dev->ifindex)
2518
continue;
2519
if (no_gw && rt->fib6_nh->fib_nh_gw_family)
2520
continue;
2521
if ((rt->fib6_flags & flags) != flags)
2522
continue;
2523
if ((rt->fib6_flags & noflags) != 0)
2524
continue;
2525
if (!fib6_info_hold_safe(rt))
2526
continue;
2527
break;
2528
}
2529
out:
2530
rcu_read_unlock();
2531
return rt;
2532
}
2533
2534
2535
/* Create "default" multicast route to the interface */
2536
2537
static void addrconf_add_mroute(struct net_device *dev)
2538
{
2539
struct fib6_config cfg = {
2540
.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2541
.fc_metric = IP6_RT_PRIO_ADDRCONF,
2542
.fc_ifindex = dev->ifindex,
2543
.fc_dst_len = 8,
2544
.fc_flags = RTF_UP,
2545
.fc_type = RTN_MULTICAST,
2546
.fc_nlinfo.nl_net = dev_net(dev),
2547
.fc_protocol = RTPROT_KERNEL,
2548
};
2549
2550
ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2551
2552
ip6_route_add(&cfg, GFP_KERNEL, NULL);
2553
}
2554
2555
static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2556
{
2557
struct inet6_dev *idev;
2558
2559
ASSERT_RTNL();
2560
2561
idev = ipv6_find_idev(dev);
2562
if (IS_ERR(idev))
2563
return idev;
2564
2565
if (idev->cnf.disable_ipv6)
2566
return ERR_PTR(-EACCES);
2567
2568
/* Add default multicast route */
2569
if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2570
addrconf_add_mroute(dev);
2571
2572
return idev;
2573
}
2574
2575
static void delete_tempaddrs(struct inet6_dev *idev,
2576
struct inet6_ifaddr *ifp)
2577
{
2578
struct inet6_ifaddr *ift, *tmp;
2579
2580
write_lock_bh(&idev->lock);
2581
list_for_each_entry_safe(ift, tmp, &idev->tempaddr_list, tmp_list) {
2582
if (ift->ifpub != ifp)
2583
continue;
2584
2585
in6_ifa_hold(ift);
2586
write_unlock_bh(&idev->lock);
2587
ipv6_del_addr(ift);
2588
write_lock_bh(&idev->lock);
2589
}
2590
write_unlock_bh(&idev->lock);
2591
}
2592
2593
static void manage_tempaddrs(struct inet6_dev *idev,
2594
struct inet6_ifaddr *ifp,
2595
__u32 valid_lft, __u32 prefered_lft,
2596
bool create, unsigned long now)
2597
{
2598
u32 flags;
2599
struct inet6_ifaddr *ift;
2600
2601
read_lock_bh(&idev->lock);
2602
/* update all temporary addresses in the list */
2603
list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2604
int age, max_valid, max_prefered;
2605
2606
if (ifp != ift->ifpub)
2607
continue;
2608
2609
/* RFC 4941 section 3.3:
2610
* If a received option will extend the lifetime of a public
2611
* address, the lifetimes of temporary addresses should
2612
* be extended, subject to the overall constraint that no
2613
* temporary addresses should ever remain "valid" or "preferred"
2614
* for a time longer than (TEMP_VALID_LIFETIME) or
2615
* (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2616
*/
2617
age = (now - ift->cstamp) / HZ;
2618
max_valid = READ_ONCE(idev->cnf.temp_valid_lft) - age;
2619
if (max_valid < 0)
2620
max_valid = 0;
2621
2622
max_prefered = READ_ONCE(idev->cnf.temp_prefered_lft) -
2623
idev->desync_factor - age;
2624
if (max_prefered < 0)
2625
max_prefered = 0;
2626
2627
if (valid_lft > max_valid)
2628
valid_lft = max_valid;
2629
2630
if (prefered_lft > max_prefered)
2631
prefered_lft = max_prefered;
2632
2633
spin_lock(&ift->lock);
2634
flags = ift->flags;
2635
ift->valid_lft = valid_lft;
2636
ift->prefered_lft = prefered_lft;
2637
ift->tstamp = now;
2638
if (prefered_lft > 0)
2639
ift->flags &= ~IFA_F_DEPRECATED;
2640
2641
spin_unlock(&ift->lock);
2642
if (!(flags&IFA_F_TENTATIVE))
2643
ipv6_ifa_notify(0, ift);
2644
}
2645
2646
/* Also create a temporary address if it's enabled but no temporary
2647
* address currently exists.
2648
* However, we get called with valid_lft == 0, prefered_lft == 0, create == false
2649
* as part of cleanup (ie. deleting the mngtmpaddr).
2650
* We don't want that to result in creating a new temporary ip address.
2651
*/
2652
if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft))
2653
create = true;
2654
2655
if (create && READ_ONCE(idev->cnf.use_tempaddr) > 0) {
2656
/* When a new public address is created as described
2657
* in [ADDRCONF], also create a new temporary address.
2658
*/
2659
read_unlock_bh(&idev->lock);
2660
ipv6_create_tempaddr(ifp, false);
2661
} else {
2662
read_unlock_bh(&idev->lock);
2663
}
2664
}
2665
2666
static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2667
{
2668
return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2669
idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2670
}
2671
2672
int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2673
const struct prefix_info *pinfo,
2674
struct inet6_dev *in6_dev,
2675
const struct in6_addr *addr, int addr_type,
2676
u32 addr_flags, bool sllao, bool tokenized,
2677
__u32 valid_lft, u32 prefered_lft)
2678
{
2679
struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2680
int create = 0, update_lft = 0;
2681
2682
if (!ifp && valid_lft) {
2683
int max_addresses = READ_ONCE(in6_dev->cnf.max_addresses);
2684
struct ifa6_config cfg = {
2685
.pfx = addr,
2686
.plen = pinfo->prefix_len,
2687
.ifa_flags = addr_flags,
2688
.valid_lft = valid_lft,
2689
.preferred_lft = prefered_lft,
2690
.scope = addr_type & IPV6_ADDR_SCOPE_MASK,
2691
.ifa_proto = IFAPROT_KERNEL_RA
2692
};
2693
2694
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2695
if ((READ_ONCE(net->ipv6.devconf_all->optimistic_dad) ||
2696
READ_ONCE(in6_dev->cnf.optimistic_dad)) &&
2697
!net->ipv6.devconf_all->forwarding && sllao)
2698
cfg.ifa_flags |= IFA_F_OPTIMISTIC;
2699
#endif
2700
2701
/* Do not allow to create too much of autoconfigured
2702
* addresses; this would be too easy way to crash kernel.
2703
*/
2704
if (!max_addresses ||
2705
ipv6_count_addresses(in6_dev) < max_addresses)
2706
ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL);
2707
2708
if (IS_ERR_OR_NULL(ifp))
2709
return -1;
2710
2711
create = 1;
2712
spin_lock_bh(&ifp->lock);
2713
ifp->flags |= IFA_F_MANAGETEMPADDR;
2714
ifp->cstamp = jiffies;
2715
ifp->tokenized = tokenized;
2716
spin_unlock_bh(&ifp->lock);
2717
addrconf_dad_start(ifp);
2718
}
2719
2720
if (ifp) {
2721
u32 flags;
2722
unsigned long now;
2723
u32 stored_lft;
2724
2725
/* update lifetime (RFC2462 5.5.3 e) */
2726
spin_lock_bh(&ifp->lock);
2727
now = jiffies;
2728
if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2729
stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2730
else
2731
stored_lft = 0;
2732
2733
/* RFC4862 Section 5.5.3e:
2734
* "Note that the preferred lifetime of the
2735
* corresponding address is always reset to
2736
* the Preferred Lifetime in the received
2737
* Prefix Information option, regardless of
2738
* whether the valid lifetime is also reset or
2739
* ignored."
2740
*
2741
* So we should always update prefered_lft here.
2742
*/
2743
update_lft = !create && stored_lft;
2744
2745
if (update_lft && !READ_ONCE(in6_dev->cnf.ra_honor_pio_life)) {
2746
const u32 minimum_lft = min_t(u32,
2747
stored_lft, MIN_VALID_LIFETIME);
2748
valid_lft = max(valid_lft, minimum_lft);
2749
}
2750
2751
if (update_lft) {
2752
ifp->valid_lft = valid_lft;
2753
ifp->prefered_lft = prefered_lft;
2754
WRITE_ONCE(ifp->tstamp, now);
2755
flags = ifp->flags;
2756
ifp->flags &= ~IFA_F_DEPRECATED;
2757
spin_unlock_bh(&ifp->lock);
2758
2759
if (!(flags&IFA_F_TENTATIVE))
2760
ipv6_ifa_notify(0, ifp);
2761
} else
2762
spin_unlock_bh(&ifp->lock);
2763
2764
manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2765
create, now);
2766
2767
in6_ifa_put(ifp);
2768
addrconf_verify(net);
2769
}
2770
2771
return 0;
2772
}
2773
EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2774
2775
void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2776
{
2777
struct prefix_info *pinfo;
2778
struct fib6_table *table;
2779
__u32 valid_lft;
2780
__u32 prefered_lft;
2781
int addr_type, err;
2782
u32 addr_flags = 0;
2783
struct inet6_dev *in6_dev;
2784
struct net *net = dev_net(dev);
2785
bool ignore_autoconf = false;
2786
2787
pinfo = (struct prefix_info *) opt;
2788
2789
if (len < sizeof(struct prefix_info)) {
2790
netdev_dbg(dev, "addrconf: prefix option too short\n");
2791
return;
2792
}
2793
2794
/*
2795
* Validation checks ([ADDRCONF], page 19)
2796
*/
2797
2798
addr_type = ipv6_addr_type(&pinfo->prefix);
2799
2800
if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2801
return;
2802
2803
valid_lft = ntohl(pinfo->valid);
2804
prefered_lft = ntohl(pinfo->prefered);
2805
2806
if (prefered_lft > valid_lft) {
2807
net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2808
return;
2809
}
2810
2811
in6_dev = in6_dev_get(dev);
2812
2813
if (!in6_dev) {
2814
net_dbg_ratelimited("addrconf: device %s not configured\n",
2815
dev->name);
2816
return;
2817
}
2818
2819
if (valid_lft != 0 && valid_lft < in6_dev->cnf.accept_ra_min_lft)
2820
goto put;
2821
2822
/*
2823
* Two things going on here:
2824
* 1) Add routes for on-link prefixes
2825
* 2) Configure prefixes with the auto flag set
2826
*/
2827
2828
if (pinfo->onlink) {
2829
struct fib6_info *rt;
2830
unsigned long rt_expires;
2831
2832
/* Avoid arithmetic overflow. Really, we could
2833
* save rt_expires in seconds, likely valid_lft,
2834
* but it would require division in fib gc, that it
2835
* not good.
2836
*/
2837
if (HZ > USER_HZ)
2838
rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2839
else
2840
rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2841
2842
if (addrconf_finite_timeout(rt_expires))
2843
rt_expires *= HZ;
2844
2845
rt = addrconf_get_prefix_route(&pinfo->prefix,
2846
pinfo->prefix_len,
2847
dev,
2848
RTF_ADDRCONF | RTF_PREFIX_RT,
2849
RTF_DEFAULT, true);
2850
2851
if (rt) {
2852
/* Autoconf prefix route */
2853
if (valid_lft == 0) {
2854
ip6_del_rt(net, rt, false);
2855
rt = NULL;
2856
} else {
2857
table = rt->fib6_table;
2858
spin_lock_bh(&table->tb6_lock);
2859
2860
if (addrconf_finite_timeout(rt_expires)) {
2861
/* not infinity */
2862
fib6_set_expires(rt, jiffies + rt_expires);
2863
fib6_add_gc_list(rt);
2864
} else {
2865
fib6_clean_expires(rt);
2866
fib6_remove_gc_list(rt);
2867
}
2868
2869
spin_unlock_bh(&table->tb6_lock);
2870
}
2871
} else if (valid_lft) {
2872
clock_t expires = 0;
2873
int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2874
if (addrconf_finite_timeout(rt_expires)) {
2875
/* not infinity */
2876
flags |= RTF_EXPIRES;
2877
expires = jiffies_to_clock_t(rt_expires);
2878
}
2879
addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2880
0, dev, expires, flags,
2881
GFP_ATOMIC);
2882
}
2883
fib6_info_release(rt);
2884
}
2885
2886
/* Try to figure out our local address for this prefix */
2887
2888
ignore_autoconf = READ_ONCE(in6_dev->cnf.ra_honor_pio_pflag) && pinfo->preferpd;
2889
if (pinfo->autoconf && in6_dev->cnf.autoconf && !ignore_autoconf) {
2890
struct in6_addr addr;
2891
bool tokenized = false, dev_addr_generated = false;
2892
2893
if (pinfo->prefix_len == 64) {
2894
memcpy(&addr, &pinfo->prefix, 8);
2895
2896
if (!ipv6_addr_any(&in6_dev->token)) {
2897
read_lock_bh(&in6_dev->lock);
2898
memcpy(addr.s6_addr + 8,
2899
in6_dev->token.s6_addr + 8, 8);
2900
read_unlock_bh(&in6_dev->lock);
2901
tokenized = true;
2902
} else if (is_addr_mode_generate_stable(in6_dev) &&
2903
!ipv6_generate_stable_address(&addr, 0,
2904
in6_dev)) {
2905
addr_flags |= IFA_F_STABLE_PRIVACY;
2906
goto ok;
2907
} else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2908
ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2909
goto put;
2910
} else {
2911
dev_addr_generated = true;
2912
}
2913
goto ok;
2914
}
2915
net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2916
pinfo->prefix_len);
2917
goto put;
2918
2919
ok:
2920
err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2921
&addr, addr_type,
2922
addr_flags, sllao,
2923
tokenized, valid_lft,
2924
prefered_lft);
2925
if (err)
2926
goto put;
2927
2928
/* Ignore error case here because previous prefix add addr was
2929
* successful which will be notified.
2930
*/
2931
ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2932
addr_type, addr_flags, sllao,
2933
tokenized, valid_lft,
2934
prefered_lft,
2935
dev_addr_generated);
2936
}
2937
inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2938
put:
2939
in6_dev_put(in6_dev);
2940
}
2941
2942
static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev,
2943
struct in6_ifreq *ireq)
2944
{
2945
struct ip_tunnel_parm_kern p = { };
2946
int err;
2947
2948
if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4))
2949
return -EADDRNOTAVAIL;
2950
2951
p.iph.daddr = ireq->ifr6_addr.s6_addr32[3];
2952
p.iph.version = 4;
2953
p.iph.ihl = 5;
2954
p.iph.protocol = IPPROTO_IPV6;
2955
p.iph.ttl = 64;
2956
2957
if (!dev->netdev_ops->ndo_tunnel_ctl)
2958
return -EOPNOTSUPP;
2959
err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, SIOCADDTUNNEL);
2960
if (err)
2961
return err;
2962
2963
dev = __dev_get_by_name(net, p.name);
2964
if (!dev)
2965
return -ENOBUFS;
2966
return dev_open(dev, NULL);
2967
}
2968
2969
/*
2970
* Set destination address.
2971
* Special case for SIT interfaces where we create a new "virtual"
2972
* device.
2973
*/
2974
int addrconf_set_dstaddr(struct net *net, void __user *arg)
2975
{
2976
struct net_device *dev;
2977
struct in6_ifreq ireq;
2978
int err = -ENODEV;
2979
2980
if (!IS_ENABLED(CONFIG_IPV6_SIT))
2981
return -ENODEV;
2982
if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2983
return -EFAULT;
2984
2985
rtnl_net_lock(net);
2986
dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2987
if (dev && dev->type == ARPHRD_SIT)
2988
err = addrconf_set_sit_dstaddr(net, dev, &ireq);
2989
rtnl_net_unlock(net);
2990
return err;
2991
}
2992
2993
static int ipv6_mc_config(struct sock *sk, bool join,
2994
const struct in6_addr *addr, int ifindex)
2995
{
2996
int ret;
2997
2998
ASSERT_RTNL();
2999
3000
lock_sock(sk);
3001
if (join)
3002
ret = ipv6_sock_mc_join(sk, ifindex, addr);
3003
else
3004
ret = ipv6_sock_mc_drop(sk, ifindex, addr);
3005
release_sock(sk);
3006
3007
return ret;
3008
}
3009
3010
/*
3011
* Manual configuration of address on an interface
3012
*/
3013
static int inet6_addr_add(struct net *net, struct net_device *dev,
3014
struct ifa6_config *cfg, clock_t expires, u32 flags,
3015
struct netlink_ext_ack *extack)
3016
{
3017
struct inet6_ifaddr *ifp;
3018
struct inet6_dev *idev;
3019
3020
ASSERT_RTNL_NET(net);
3021
3022
if (cfg->plen > 128) {
3023
NL_SET_ERR_MSG_MOD(extack, "Invalid prefix length");
3024
return -EINVAL;
3025
}
3026
3027
if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64) {
3028
NL_SET_ERR_MSG_MOD(extack, "address with \"mngtmpaddr\" flag must have a prefix length of 64");
3029
return -EINVAL;
3030
}
3031
3032
idev = addrconf_add_dev(dev);
3033
if (IS_ERR(idev)) {
3034
NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device");
3035
return PTR_ERR(idev);
3036
}
3037
3038
if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
3039
int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3040
true, cfg->pfx, dev->ifindex);
3041
3042
if (ret < 0) {
3043
NL_SET_ERR_MSG_MOD(extack, "Multicast auto join failed");
3044
return ret;
3045
}
3046
}
3047
3048
cfg->scope = ipv6_addr_scope(cfg->pfx);
3049
3050
ifp = ipv6_add_addr(idev, cfg, true, extack);
3051
if (!IS_ERR(ifp)) {
3052
if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
3053
addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3054
ifp->rt_priority, dev, expires,
3055
flags, GFP_KERNEL);
3056
}
3057
3058
/* Send a netlink notification if DAD is enabled and
3059
* optimistic flag is not set
3060
*/
3061
if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD)))
3062
ipv6_ifa_notify(0, ifp);
3063
/*
3064
* Note that section 3.1 of RFC 4429 indicates
3065
* that the Optimistic flag should not be set for
3066
* manually configured addresses
3067
*/
3068
addrconf_dad_start(ifp);
3069
if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR)
3070
manage_tempaddrs(idev, ifp, cfg->valid_lft,
3071
cfg->preferred_lft, true, jiffies);
3072
in6_ifa_put(ifp);
3073
addrconf_verify_rtnl(net);
3074
return 0;
3075
} else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
3076
ipv6_mc_config(net->ipv6.mc_autojoin_sk, false,
3077
cfg->pfx, dev->ifindex);
3078
}
3079
3080
return PTR_ERR(ifp);
3081
}
3082
3083
static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
3084
const struct in6_addr *pfx, unsigned int plen,
3085
struct netlink_ext_ack *extack)
3086
{
3087
struct inet6_ifaddr *ifp;
3088
struct inet6_dev *idev;
3089
struct net_device *dev;
3090
3091
if (plen > 128) {
3092
NL_SET_ERR_MSG_MOD(extack, "Invalid prefix length");
3093
return -EINVAL;
3094
}
3095
3096
dev = __dev_get_by_index(net, ifindex);
3097
if (!dev) {
3098
NL_SET_ERR_MSG_MOD(extack, "Unable to find the interface");
3099
return -ENODEV;
3100
}
3101
3102
idev = __in6_dev_get_rtnl_net(dev);
3103
if (!idev) {
3104
NL_SET_ERR_MSG_MOD(extack, "IPv6 is disabled on this device");
3105
return -ENXIO;
3106
}
3107
3108
read_lock_bh(&idev->lock);
3109
list_for_each_entry(ifp, &idev->addr_list, if_list) {
3110
if (ifp->prefix_len == plen &&
3111
ipv6_addr_equal(pfx, &ifp->addr)) {
3112
in6_ifa_hold(ifp);
3113
read_unlock_bh(&idev->lock);
3114
3115
ipv6_del_addr(ifp);
3116
3117
if (!(ifp->flags & IFA_F_TEMPORARY) &&
3118
(ifp->flags & IFA_F_MANAGETEMPADDR))
3119
delete_tempaddrs(idev, ifp);
3120
3121
addrconf_verify_rtnl(net);
3122
if (ipv6_addr_is_multicast(pfx)) {
3123
ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3124
false, pfx, dev->ifindex);
3125
}
3126
return 0;
3127
}
3128
}
3129
read_unlock_bh(&idev->lock);
3130
3131
NL_SET_ERR_MSG_MOD(extack, "address not found");
3132
return -EADDRNOTAVAIL;
3133
}
3134
3135
3136
int addrconf_add_ifaddr(struct net *net, void __user *arg)
3137
{
3138
struct ifa6_config cfg = {
3139
.ifa_flags = IFA_F_PERMANENT,
3140
.preferred_lft = INFINITY_LIFE_TIME,
3141
.valid_lft = INFINITY_LIFE_TIME,
3142
};
3143
struct net_device *dev;
3144
struct in6_ifreq ireq;
3145
int err;
3146
3147
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3148
return -EPERM;
3149
3150
if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3151
return -EFAULT;
3152
3153
cfg.pfx = &ireq.ifr6_addr;
3154
cfg.plen = ireq.ifr6_prefixlen;
3155
3156
rtnl_net_lock(net);
3157
dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
3158
if (dev) {
3159
netdev_lock_ops(dev);
3160
err = inet6_addr_add(net, dev, &cfg, 0, 0, NULL);
3161
netdev_unlock_ops(dev);
3162
} else {
3163
err = -ENODEV;
3164
}
3165
rtnl_net_unlock(net);
3166
return err;
3167
}
3168
3169
int addrconf_del_ifaddr(struct net *net, void __user *arg)
3170
{
3171
struct in6_ifreq ireq;
3172
int err;
3173
3174
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3175
return -EPERM;
3176
3177
if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3178
return -EFAULT;
3179
3180
rtnl_net_lock(net);
3181
err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
3182
ireq.ifr6_prefixlen, NULL);
3183
rtnl_net_unlock(net);
3184
return err;
3185
}
3186
3187
static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
3188
int plen, int scope, u8 proto)
3189
{
3190
struct inet6_ifaddr *ifp;
3191
struct ifa6_config cfg = {
3192
.pfx = addr,
3193
.plen = plen,
3194
.ifa_flags = IFA_F_PERMANENT,
3195
.valid_lft = INFINITY_LIFE_TIME,
3196
.preferred_lft = INFINITY_LIFE_TIME,
3197
.scope = scope,
3198
.ifa_proto = proto
3199
};
3200
3201
ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3202
if (!IS_ERR(ifp)) {
3203
spin_lock_bh(&ifp->lock);
3204
ifp->flags &= ~IFA_F_TENTATIVE;
3205
spin_unlock_bh(&ifp->lock);
3206
rt_genid_bump_ipv6(dev_net(idev->dev));
3207
ipv6_ifa_notify(RTM_NEWADDR, ifp);
3208
in6_ifa_put(ifp);
3209
}
3210
}
3211
3212
#if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE)
3213
static void add_v4_addrs(struct inet6_dev *idev)
3214
{
3215
struct in6_addr addr;
3216
struct net_device *dev;
3217
struct net *net = dev_net(idev->dev);
3218
int scope, plen;
3219
u32 pflags = 0;
3220
3221
ASSERT_RTNL();
3222
3223
memset(&addr, 0, sizeof(struct in6_addr));
3224
memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
3225
3226
if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
3227
scope = IPV6_ADDR_COMPATv4;
3228
plen = 96;
3229
pflags |= RTF_NONEXTHOP;
3230
} else {
3231
if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE)
3232
return;
3233
3234
addr.s6_addr32[0] = htonl(0xfe800000);
3235
scope = IFA_LINK;
3236
plen = 64;
3237
}
3238
3239
if (addr.s6_addr32[3]) {
3240
add_addr(idev, &addr, plen, scope, IFAPROT_UNSPEC);
3241
addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
3242
GFP_KERNEL);
3243
return;
3244
}
3245
3246
for_each_netdev(net, dev) {
3247
struct in_device *in_dev = __in_dev_get_rtnl(dev);
3248
if (in_dev && (dev->flags & IFF_UP)) {
3249
struct in_ifaddr *ifa;
3250
int flag = scope;
3251
3252
in_dev_for_each_ifa_rtnl(ifa, in_dev) {
3253
addr.s6_addr32[3] = ifa->ifa_local;
3254
3255
if (ifa->ifa_scope == RT_SCOPE_LINK)
3256
continue;
3257
if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3258
if (idev->dev->flags&IFF_POINTOPOINT)
3259
continue;
3260
flag |= IFA_HOST;
3261
}
3262
3263
add_addr(idev, &addr, plen, flag,
3264
IFAPROT_UNSPEC);
3265
addrconf_prefix_route(&addr, plen, 0, idev->dev,
3266
0, pflags, GFP_KERNEL);
3267
}
3268
}
3269
}
3270
}
3271
#endif
3272
3273
static void init_loopback(struct net_device *dev)
3274
{
3275
struct inet6_dev *idev;
3276
3277
/* ::1 */
3278
3279
ASSERT_RTNL();
3280
3281
idev = ipv6_find_idev(dev);
3282
if (IS_ERR(idev)) {
3283
pr_debug("%s: add_dev failed\n", __func__);
3284
return;
3285
}
3286
3287
add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFAPROT_KERNEL_LO);
3288
}
3289
3290
void addrconf_add_linklocal(struct inet6_dev *idev,
3291
const struct in6_addr *addr, u32 flags)
3292
{
3293
struct ifa6_config cfg = {
3294
.pfx = addr,
3295
.plen = 64,
3296
.ifa_flags = flags | IFA_F_PERMANENT,
3297
.valid_lft = INFINITY_LIFE_TIME,
3298
.preferred_lft = INFINITY_LIFE_TIME,
3299
.scope = IFA_LINK,
3300
.ifa_proto = IFAPROT_KERNEL_LL
3301
};
3302
struct inet6_ifaddr *ifp;
3303
3304
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3305
if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad) ||
3306
READ_ONCE(idev->cnf.optimistic_dad)) &&
3307
!dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3308
cfg.ifa_flags |= IFA_F_OPTIMISTIC;
3309
#endif
3310
3311
ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3312
if (!IS_ERR(ifp)) {
3313
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev,
3314
0, 0, GFP_ATOMIC);
3315
addrconf_dad_start(ifp);
3316
in6_ifa_put(ifp);
3317
}
3318
}
3319
EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3320
3321
static bool ipv6_reserved_interfaceid(struct in6_addr address)
3322
{
3323
if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3324
return true;
3325
3326
if (address.s6_addr32[2] == htonl(0x02005eff) &&
3327
((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3328
return true;
3329
3330
if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3331
((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3332
return true;
3333
3334
return false;
3335
}
3336
3337
static int ipv6_generate_stable_address(struct in6_addr *address,
3338
u8 dad_count,
3339
const struct inet6_dev *idev)
3340
{
3341
static DEFINE_SPINLOCK(lock);
3342
static __u32 digest[SHA1_DIGEST_WORDS];
3343
static __u32 workspace[SHA1_WORKSPACE_WORDS];
3344
3345
static union {
3346
char __data[SHA1_BLOCK_SIZE];
3347
struct {
3348
struct in6_addr secret;
3349
__be32 prefix[2];
3350
unsigned char hwaddr[MAX_ADDR_LEN];
3351
u8 dad_count;
3352
} __packed;
3353
} data;
3354
3355
struct in6_addr secret;
3356
struct in6_addr temp;
3357
struct net *net = dev_net(idev->dev);
3358
3359
BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3360
3361
if (idev->cnf.stable_secret.initialized)
3362
secret = idev->cnf.stable_secret.secret;
3363
else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3364
secret = net->ipv6.devconf_dflt->stable_secret.secret;
3365
else
3366
return -1;
3367
3368
retry:
3369
spin_lock_bh(&lock);
3370
3371
sha1_init_raw(digest);
3372
memset(&data, 0, sizeof(data));
3373
memset(workspace, 0, sizeof(workspace));
3374
memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3375
data.prefix[0] = address->s6_addr32[0];
3376
data.prefix[1] = address->s6_addr32[1];
3377
data.secret = secret;
3378
data.dad_count = dad_count;
3379
3380
sha1_transform(digest, data.__data, workspace);
3381
3382
temp = *address;
3383
temp.s6_addr32[2] = (__force __be32)digest[0];
3384
temp.s6_addr32[3] = (__force __be32)digest[1];
3385
3386
spin_unlock_bh(&lock);
3387
3388
if (ipv6_reserved_interfaceid(temp)) {
3389
dad_count++;
3390
if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3391
return -1;
3392
goto retry;
3393
}
3394
3395
*address = temp;
3396
return 0;
3397
}
3398
3399
static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3400
{
3401
struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3402
3403
if (s->initialized)
3404
return;
3405
s = &idev->cnf.stable_secret;
3406
get_random_bytes(&s->secret, sizeof(s->secret));
3407
s->initialized = true;
3408
}
3409
3410
static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3411
{
3412
struct in6_addr addr;
3413
3414
/* no link local addresses on L3 master devices */
3415
if (netif_is_l3_master(idev->dev))
3416
return;
3417
3418
/* no link local addresses on devices flagged as slaves */
3419
if (idev->dev->priv_flags & IFF_NO_ADDRCONF)
3420
return;
3421
3422
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3423
3424
switch (idev->cnf.addr_gen_mode) {
3425
case IN6_ADDR_GEN_MODE_RANDOM:
3426
ipv6_gen_mode_random_init(idev);
3427
fallthrough;
3428
case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3429
if (!ipv6_generate_stable_address(&addr, 0, idev))
3430
addrconf_add_linklocal(idev, &addr,
3431
IFA_F_STABLE_PRIVACY);
3432
else if (prefix_route)
3433
addrconf_prefix_route(&addr, 64, 0, idev->dev,
3434
0, 0, GFP_KERNEL);
3435
break;
3436
case IN6_ADDR_GEN_MODE_EUI64:
3437
/* addrconf_add_linklocal also adds a prefix_route and we
3438
* only need to care about prefix routes if ipv6_generate_eui64
3439
* couldn't generate one.
3440
*/
3441
if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3442
addrconf_add_linklocal(idev, &addr, 0);
3443
else if (prefix_route)
3444
addrconf_prefix_route(&addr, 64, 0, idev->dev,
3445
0, 0, GFP_KERNEL);
3446
break;
3447
case IN6_ADDR_GEN_MODE_NONE:
3448
default:
3449
/* will not add any link local address */
3450
break;
3451
}
3452
}
3453
3454
static void addrconf_dev_config(struct net_device *dev)
3455
{
3456
struct inet6_dev *idev;
3457
3458
ASSERT_RTNL();
3459
3460
if ((dev->type != ARPHRD_ETHER) &&
3461
(dev->type != ARPHRD_FDDI) &&
3462
(dev->type != ARPHRD_ARCNET) &&
3463
(dev->type != ARPHRD_INFINIBAND) &&
3464
(dev->type != ARPHRD_IEEE1394) &&
3465
(dev->type != ARPHRD_TUNNEL6) &&
3466
(dev->type != ARPHRD_6LOWPAN) &&
3467
(dev->type != ARPHRD_IP6GRE) &&
3468
(dev->type != ARPHRD_TUNNEL) &&
3469
(dev->type != ARPHRD_NONE) &&
3470
(dev->type != ARPHRD_RAWIP)) {
3471
/* Alas, we support only Ethernet autoconfiguration. */
3472
idev = __in6_dev_get(dev);
3473
if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
3474
dev->flags & IFF_MULTICAST)
3475
ipv6_mc_up(idev);
3476
return;
3477
}
3478
3479
idev = addrconf_add_dev(dev);
3480
if (IS_ERR(idev))
3481
return;
3482
3483
/* this device type has no EUI support */
3484
if (dev->type == ARPHRD_NONE &&
3485
idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3486
WRITE_ONCE(idev->cnf.addr_gen_mode,
3487
IN6_ADDR_GEN_MODE_RANDOM);
3488
3489
addrconf_addr_gen(idev, false);
3490
}
3491
3492
#if IS_ENABLED(CONFIG_IPV6_SIT)
3493
static void addrconf_sit_config(struct net_device *dev)
3494
{
3495
struct inet6_dev *idev;
3496
3497
ASSERT_RTNL();
3498
3499
/*
3500
* Configure the tunnel with one of our IPv4
3501
* addresses... we should configure all of
3502
* our v4 addrs in the tunnel
3503
*/
3504
3505
idev = ipv6_find_idev(dev);
3506
if (IS_ERR(idev)) {
3507
pr_debug("%s: add_dev failed\n", __func__);
3508
return;
3509
}
3510
3511
if (dev->priv_flags & IFF_ISATAP) {
3512
addrconf_addr_gen(idev, false);
3513
return;
3514
}
3515
3516
add_v4_addrs(idev);
3517
3518
if (dev->flags&IFF_POINTOPOINT)
3519
addrconf_add_mroute(dev);
3520
}
3521
#endif
3522
3523
#if IS_ENABLED(CONFIG_NET_IPGRE)
3524
static void addrconf_gre_config(struct net_device *dev)
3525
{
3526
struct inet6_dev *idev;
3527
3528
ASSERT_RTNL();
3529
3530
idev = addrconf_add_dev(dev);
3531
if (IS_ERR(idev))
3532
return;
3533
3534
/* Generate the IPv6 link-local address using addrconf_addr_gen(),
3535
* unless we have an IPv4 GRE device not bound to an IP address and
3536
* which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this
3537
* case). Such devices fall back to add_v4_addrs() instead.
3538
*/
3539
if (!(*(__be32 *)dev->dev_addr == 0 &&
3540
idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) {
3541
addrconf_addr_gen(idev, true);
3542
return;
3543
}
3544
3545
add_v4_addrs(idev);
3546
}
3547
#endif
3548
3549
static void addrconf_init_auto_addrs(struct net_device *dev)
3550
{
3551
switch (dev->type) {
3552
#if IS_ENABLED(CONFIG_IPV6_SIT)
3553
case ARPHRD_SIT:
3554
addrconf_sit_config(dev);
3555
break;
3556
#endif
3557
#if IS_ENABLED(CONFIG_NET_IPGRE)
3558
case ARPHRD_IPGRE:
3559
addrconf_gre_config(dev);
3560
break;
3561
#endif
3562
case ARPHRD_LOOPBACK:
3563
init_loopback(dev);
3564
break;
3565
3566
default:
3567
addrconf_dev_config(dev);
3568
break;
3569
}
3570
}
3571
3572
static int fixup_permanent_addr(struct net *net,
3573
struct inet6_dev *idev,
3574
struct inet6_ifaddr *ifp)
3575
{
3576
/* !fib6_node means the host route was removed from the
3577
* FIB, for example, if 'lo' device is taken down. In that
3578
* case regenerate the host route.
3579
*/
3580
if (!ifp->rt || !ifp->rt->fib6_node) {
3581
struct fib6_info *f6i, *prev;
3582
3583
f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false,
3584
GFP_ATOMIC, NULL);
3585
if (IS_ERR(f6i))
3586
return PTR_ERR(f6i);
3587
3588
/* ifp->rt can be accessed outside of rtnl */
3589
spin_lock(&ifp->lock);
3590
prev = ifp->rt;
3591
ifp->rt = f6i;
3592
spin_unlock(&ifp->lock);
3593
3594
fib6_info_release(prev);
3595
}
3596
3597
if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3598
addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3599
ifp->rt_priority, idev->dev, 0, 0,
3600
GFP_ATOMIC);
3601
}
3602
3603
if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3604
addrconf_dad_start(ifp);
3605
3606
return 0;
3607
}
3608
3609
static void addrconf_permanent_addr(struct net *net, struct net_device *dev)
3610
{
3611
struct inet6_ifaddr *ifp, *tmp;
3612
struct inet6_dev *idev;
3613
3614
idev = __in6_dev_get(dev);
3615
if (!idev)
3616
return;
3617
3618
write_lock_bh(&idev->lock);
3619
3620
list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3621
if ((ifp->flags & IFA_F_PERMANENT) &&
3622
fixup_permanent_addr(net, idev, ifp) < 0) {
3623
write_unlock_bh(&idev->lock);
3624
in6_ifa_hold(ifp);
3625
ipv6_del_addr(ifp);
3626
write_lock_bh(&idev->lock);
3627
3628
net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3629
idev->dev->name, &ifp->addr);
3630
}
3631
}
3632
3633
write_unlock_bh(&idev->lock);
3634
}
3635
3636
static int addrconf_notify(struct notifier_block *this, unsigned long event,
3637
void *ptr)
3638
{
3639
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3640
struct netdev_notifier_change_info *change_info;
3641
struct netdev_notifier_changeupper_info *info;
3642
struct inet6_dev *idev = __in6_dev_get(dev);
3643
struct net *net = dev_net(dev);
3644
int run_pending = 0;
3645
int err;
3646
3647
switch (event) {
3648
case NETDEV_REGISTER:
3649
if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3650
idev = ipv6_add_dev(dev);
3651
if (IS_ERR(idev))
3652
return notifier_from_errno(PTR_ERR(idev));
3653
}
3654
break;
3655
3656
case NETDEV_CHANGEMTU:
3657
/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3658
if (dev->mtu < IPV6_MIN_MTU) {
3659
addrconf_ifdown(dev, dev != net->loopback_dev);
3660
break;
3661
}
3662
3663
if (idev) {
3664
rt6_mtu_change(dev, dev->mtu);
3665
WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
3666
break;
3667
}
3668
3669
/* allocate new idev */
3670
idev = ipv6_add_dev(dev);
3671
if (IS_ERR(idev))
3672
break;
3673
3674
/* device is still not ready */
3675
if (!(idev->if_flags & IF_READY))
3676
break;
3677
3678
run_pending = 1;
3679
fallthrough;
3680
case NETDEV_UP:
3681
case NETDEV_CHANGE:
3682
if (idev && idev->cnf.disable_ipv6)
3683
break;
3684
3685
if (dev->priv_flags & IFF_NO_ADDRCONF) {
3686
if (event == NETDEV_UP && !IS_ERR_OR_NULL(idev) &&
3687
dev->flags & IFF_UP && dev->flags & IFF_MULTICAST)
3688
ipv6_mc_up(idev);
3689
break;
3690
}
3691
3692
if (event == NETDEV_UP) {
3693
/* restore routes for permanent addresses */
3694
addrconf_permanent_addr(net, dev);
3695
3696
if (!addrconf_link_ready(dev)) {
3697
/* device is not ready yet. */
3698
pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3699
dev->name);
3700
break;
3701
}
3702
3703
if (!idev && dev->mtu >= IPV6_MIN_MTU)
3704
idev = ipv6_add_dev(dev);
3705
3706
if (!IS_ERR_OR_NULL(idev)) {
3707
idev->if_flags |= IF_READY;
3708
run_pending = 1;
3709
}
3710
} else if (event == NETDEV_CHANGE) {
3711
if (!addrconf_link_ready(dev)) {
3712
/* device is still not ready. */
3713
rt6_sync_down_dev(dev, event);
3714
break;
3715
}
3716
3717
if (!IS_ERR_OR_NULL(idev)) {
3718
if (idev->if_flags & IF_READY) {
3719
/* device is already configured -
3720
* but resend MLD reports, we might
3721
* have roamed and need to update
3722
* multicast snooping switches
3723
*/
3724
ipv6_mc_up(idev);
3725
change_info = ptr;
3726
if (change_info->flags_changed & IFF_NOARP)
3727
addrconf_dad_run(idev, true);
3728
rt6_sync_up(dev, RTNH_F_LINKDOWN);
3729
break;
3730
}
3731
idev->if_flags |= IF_READY;
3732
}
3733
3734
pr_debug("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3735
dev->name);
3736
3737
run_pending = 1;
3738
}
3739
3740
addrconf_init_auto_addrs(dev);
3741
3742
if (!IS_ERR_OR_NULL(idev)) {
3743
if (run_pending)
3744
addrconf_dad_run(idev, false);
3745
3746
/* Device has an address by now */
3747
rt6_sync_up(dev, RTNH_F_DEAD);
3748
3749
/*
3750
* If the MTU changed during the interface down,
3751
* when the interface up, the changed MTU must be
3752
* reflected in the idev as well as routers.
3753
*/
3754
if (idev->cnf.mtu6 != dev->mtu &&
3755
dev->mtu >= IPV6_MIN_MTU) {
3756
rt6_mtu_change(dev, dev->mtu);
3757
WRITE_ONCE(idev->cnf.mtu6, dev->mtu);
3758
}
3759
WRITE_ONCE(idev->tstamp, jiffies);
3760
inet6_ifinfo_notify(RTM_NEWLINK, idev);
3761
3762
/*
3763
* If the changed mtu during down is lower than
3764
* IPV6_MIN_MTU stop IPv6 on this interface.
3765
*/
3766
if (dev->mtu < IPV6_MIN_MTU)
3767
addrconf_ifdown(dev, dev != net->loopback_dev);
3768
}
3769
break;
3770
3771
case NETDEV_DOWN:
3772
case NETDEV_UNREGISTER:
3773
/*
3774
* Remove all addresses from this interface.
3775
*/
3776
addrconf_ifdown(dev, event != NETDEV_DOWN);
3777
break;
3778
3779
case NETDEV_CHANGENAME:
3780
if (idev) {
3781
snmp6_unregister_dev(idev);
3782
addrconf_sysctl_unregister(idev);
3783
err = addrconf_sysctl_register(idev);
3784
if (err)
3785
return notifier_from_errno(err);
3786
err = snmp6_register_dev(idev);
3787
if (err) {
3788
addrconf_sysctl_unregister(idev);
3789
return notifier_from_errno(err);
3790
}
3791
}
3792
break;
3793
3794
case NETDEV_PRE_TYPE_CHANGE:
3795
case NETDEV_POST_TYPE_CHANGE:
3796
if (idev)
3797
addrconf_type_change(dev, event);
3798
break;
3799
3800
case NETDEV_CHANGEUPPER:
3801
info = ptr;
3802
3803
/* flush all routes if dev is linked to or unlinked from
3804
* an L3 master device (e.g., VRF)
3805
*/
3806
if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3807
addrconf_ifdown(dev, false);
3808
}
3809
3810
return NOTIFY_OK;
3811
}
3812
3813
/*
3814
* addrconf module should be notified of a device going up
3815
*/
3816
static struct notifier_block ipv6_dev_notf = {
3817
.notifier_call = addrconf_notify,
3818
.priority = ADDRCONF_NOTIFY_PRIORITY,
3819
};
3820
3821
static void addrconf_type_change(struct net_device *dev, unsigned long event)
3822
{
3823
struct inet6_dev *idev;
3824
ASSERT_RTNL();
3825
3826
idev = __in6_dev_get(dev);
3827
3828
if (event == NETDEV_POST_TYPE_CHANGE)
3829
ipv6_mc_remap(idev);
3830
else if (event == NETDEV_PRE_TYPE_CHANGE)
3831
ipv6_mc_unmap(idev);
3832
}
3833
3834
static bool addr_is_local(const struct in6_addr *addr)
3835
{
3836
return ipv6_addr_type(addr) &
3837
(IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3838
}
3839
3840
static int addrconf_ifdown(struct net_device *dev, bool unregister)
3841
{
3842
unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN;
3843
struct net *net = dev_net(dev);
3844
struct inet6_dev *idev;
3845
struct inet6_ifaddr *ifa;
3846
LIST_HEAD(tmp_addr_list);
3847
bool keep_addr = false;
3848
bool was_ready;
3849
int state, i;
3850
3851
ASSERT_RTNL();
3852
3853
rt6_disable_ip(dev, event);
3854
3855
idev = __in6_dev_get(dev);
3856
if (!idev)
3857
return -ENODEV;
3858
3859
/*
3860
* Step 1: remove reference to ipv6 device from parent device.
3861
* Do not dev_put!
3862
*/
3863
if (unregister) {
3864
WRITE_ONCE(idev->dead, 1);
3865
3866
/* protected by rtnl_lock */
3867
RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3868
3869
/* Step 1.5: remove snmp6 entry */
3870
snmp6_unregister_dev(idev);
3871
3872
}
3873
3874
/* combine the user config with event to determine if permanent
3875
* addresses are to be removed from address hash table
3876
*/
3877
if (!unregister && !idev->cnf.disable_ipv6) {
3878
/* aggregate the system setting and interface setting */
3879
int _keep_addr = READ_ONCE(net->ipv6.devconf_all->keep_addr_on_down);
3880
3881
if (!_keep_addr)
3882
_keep_addr = READ_ONCE(idev->cnf.keep_addr_on_down);
3883
3884
keep_addr = (_keep_addr > 0);
3885
}
3886
3887
/* Step 2: clear hash table */
3888
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3889
struct hlist_head *h = &net->ipv6.inet6_addr_lst[i];
3890
3891
spin_lock_bh(&net->ipv6.addrconf_hash_lock);
3892
restart:
3893
hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3894
if (ifa->idev == idev) {
3895
addrconf_del_dad_work(ifa);
3896
/* combined flag + permanent flag decide if
3897
* address is retained on a down event
3898
*/
3899
if (!keep_addr ||
3900
!(ifa->flags & IFA_F_PERMANENT) ||
3901
addr_is_local(&ifa->addr)) {
3902
hlist_del_init_rcu(&ifa->addr_lst);
3903
goto restart;
3904
}
3905
}
3906
}
3907
spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
3908
}
3909
3910
write_lock_bh(&idev->lock);
3911
3912
addrconf_del_rs_timer(idev);
3913
3914
/* Step 2: clear flags for stateless addrconf, repeated down
3915
* detection
3916
*/
3917
was_ready = idev->if_flags & IF_READY;
3918
if (!unregister)
3919
idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3920
3921
/* Step 3: clear tempaddr list */
3922
while (!list_empty(&idev->tempaddr_list)) {
3923
ifa = list_first_entry(&idev->tempaddr_list,
3924
struct inet6_ifaddr, tmp_list);
3925
list_del(&ifa->tmp_list);
3926
write_unlock_bh(&idev->lock);
3927
spin_lock_bh(&ifa->lock);
3928
3929
if (ifa->ifpub) {
3930
in6_ifa_put(ifa->ifpub);
3931
ifa->ifpub = NULL;
3932
}
3933
spin_unlock_bh(&ifa->lock);
3934
in6_ifa_put(ifa);
3935
write_lock_bh(&idev->lock);
3936
}
3937
3938
list_for_each_entry(ifa, &idev->addr_list, if_list)
3939
list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
3940
write_unlock_bh(&idev->lock);
3941
3942
while (!list_empty(&tmp_addr_list)) {
3943
struct fib6_info *rt = NULL;
3944
bool keep;
3945
3946
ifa = list_first_entry(&tmp_addr_list,
3947
struct inet6_ifaddr, if_list_aux);
3948
list_del(&ifa->if_list_aux);
3949
3950
addrconf_del_dad_work(ifa);
3951
3952
keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3953
!addr_is_local(&ifa->addr);
3954
3955
spin_lock_bh(&ifa->lock);
3956
3957
if (keep) {
3958
/* set state to skip the notifier below */
3959
state = INET6_IFADDR_STATE_DEAD;
3960
ifa->state = INET6_IFADDR_STATE_PREDAD;
3961
if (!(ifa->flags & IFA_F_NODAD))
3962
ifa->flags |= IFA_F_TENTATIVE;
3963
3964
rt = ifa->rt;
3965
ifa->rt = NULL;
3966
} else {
3967
state = ifa->state;
3968
ifa->state = INET6_IFADDR_STATE_DEAD;
3969
}
3970
3971
spin_unlock_bh(&ifa->lock);
3972
3973
if (rt)
3974
ip6_del_rt(net, rt, false);
3975
3976
if (state != INET6_IFADDR_STATE_DEAD) {
3977
__ipv6_ifa_notify(RTM_DELADDR, ifa);
3978
inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3979
} else {
3980
if (idev->cnf.forwarding)
3981
addrconf_leave_anycast(ifa);
3982
addrconf_leave_solict(ifa->idev, &ifa->addr);
3983
}
3984
3985
if (!keep) {
3986
write_lock_bh(&idev->lock);
3987
list_del_rcu(&ifa->if_list);
3988
write_unlock_bh(&idev->lock);
3989
in6_ifa_put(ifa);
3990
}
3991
}
3992
3993
/* Step 5: Discard anycast and multicast list */
3994
if (unregister) {
3995
ipv6_ac_destroy_dev(idev);
3996
ipv6_mc_destroy_dev(idev);
3997
} else if (was_ready) {
3998
ipv6_mc_down(idev);
3999
}
4000
4001
WRITE_ONCE(idev->tstamp, jiffies);
4002
idev->ra_mtu = 0;
4003
4004
/* Last: Shot the device (if unregistered) */
4005
if (unregister) {
4006
addrconf_sysctl_unregister(idev);
4007
neigh_parms_release(&nd_tbl, idev->nd_parms);
4008
neigh_ifdown(&nd_tbl, dev);
4009
in6_dev_put(idev);
4010
}
4011
return 0;
4012
}
4013
4014
static void addrconf_rs_timer(struct timer_list *t)
4015
{
4016
struct inet6_dev *idev = timer_container_of(idev, t, rs_timer);
4017
struct net_device *dev = idev->dev;
4018
struct in6_addr lladdr;
4019
int rtr_solicits;
4020
4021
write_lock(&idev->lock);
4022
if (idev->dead || !(idev->if_flags & IF_READY))
4023
goto out;
4024
4025
if (!ipv6_accept_ra(idev))
4026
goto out;
4027
4028
/* Announcement received after solicitation was sent */
4029
if (idev->if_flags & IF_RA_RCVD)
4030
goto out;
4031
4032
rtr_solicits = READ_ONCE(idev->cnf.rtr_solicits);
4033
4034
if (idev->rs_probes++ < rtr_solicits || rtr_solicits < 0) {
4035
write_unlock(&idev->lock);
4036
if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4037
ndisc_send_rs(dev, &lladdr,
4038
&in6addr_linklocal_allrouters);
4039
else
4040
goto put;
4041
4042
write_lock(&idev->lock);
4043
idev->rs_interval = rfc3315_s14_backoff_update(
4044
idev->rs_interval,
4045
READ_ONCE(idev->cnf.rtr_solicit_max_interval));
4046
/* The wait after the last probe can be shorter */
4047
addrconf_mod_rs_timer(idev, (idev->rs_probes ==
4048
READ_ONCE(idev->cnf.rtr_solicits)) ?
4049
READ_ONCE(idev->cnf.rtr_solicit_delay) :
4050
idev->rs_interval);
4051
} else {
4052
/*
4053
* Note: we do not support deprecated "all on-link"
4054
* assumption any longer.
4055
*/
4056
pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
4057
}
4058
4059
out:
4060
write_unlock(&idev->lock);
4061
put:
4062
in6_dev_put(idev);
4063
}
4064
4065
/*
4066
* Duplicate Address Detection
4067
*/
4068
static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
4069
{
4070
struct inet6_dev *idev = ifp->idev;
4071
unsigned long rand_num;
4072
u64 nonce;
4073
4074
if (ifp->flags & IFA_F_OPTIMISTIC)
4075
rand_num = 0;
4076
else
4077
rand_num = get_random_u32_below(
4078
READ_ONCE(idev->cnf.rtr_solicit_delay) ? : 1);
4079
4080
nonce = 0;
4081
if (READ_ONCE(idev->cnf.enhanced_dad) ||
4082
READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad)) {
4083
do
4084
get_random_bytes(&nonce, 6);
4085
while (nonce == 0);
4086
}
4087
ifp->dad_nonce = nonce;
4088
ifp->dad_probes = READ_ONCE(idev->cnf.dad_transmits);
4089
addrconf_mod_dad_work(ifp, rand_num);
4090
}
4091
4092
static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
4093
{
4094
struct inet6_dev *idev = ifp->idev;
4095
struct net_device *dev = idev->dev;
4096
bool bump_id, notify = false;
4097
struct net *net;
4098
4099
addrconf_join_solict(dev, &ifp->addr);
4100
4101
read_lock_bh(&idev->lock);
4102
spin_lock(&ifp->lock);
4103
if (ifp->state == INET6_IFADDR_STATE_DEAD)
4104
goto out;
4105
4106
net = dev_net(dev);
4107
if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
4108
(READ_ONCE(net->ipv6.devconf_all->accept_dad) < 1 &&
4109
READ_ONCE(idev->cnf.accept_dad) < 1) ||
4110
!(ifp->flags&IFA_F_TENTATIVE) ||
4111
ifp->flags & IFA_F_NODAD) {
4112
bool send_na = false;
4113
4114
if (ifp->flags & IFA_F_TENTATIVE &&
4115
!(ifp->flags & IFA_F_OPTIMISTIC))
4116
send_na = true;
4117
bump_id = ifp->flags & IFA_F_TENTATIVE;
4118
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4119
spin_unlock(&ifp->lock);
4120
read_unlock_bh(&idev->lock);
4121
4122
addrconf_dad_completed(ifp, bump_id, send_na);
4123
return;
4124
}
4125
4126
if (!(idev->if_flags & IF_READY)) {
4127
spin_unlock(&ifp->lock);
4128
read_unlock_bh(&idev->lock);
4129
/*
4130
* If the device is not ready:
4131
* - keep it tentative if it is a permanent address.
4132
* - otherwise, kill it.
4133
*/
4134
in6_ifa_hold(ifp);
4135
addrconf_dad_stop(ifp, 0);
4136
return;
4137
}
4138
4139
/*
4140
* Optimistic nodes can start receiving
4141
* Frames right away
4142
*/
4143
if (ifp->flags & IFA_F_OPTIMISTIC) {
4144
ip6_ins_rt(net, ifp->rt);
4145
if (ipv6_use_optimistic_addr(net, idev)) {
4146
/* Because optimistic nodes can use this address,
4147
* notify listeners. If DAD fails, RTM_DELADDR is sent.
4148
*/
4149
notify = true;
4150
}
4151
}
4152
4153
addrconf_dad_kick(ifp);
4154
out:
4155
spin_unlock(&ifp->lock);
4156
read_unlock_bh(&idev->lock);
4157
if (notify)
4158
ipv6_ifa_notify(RTM_NEWADDR, ifp);
4159
}
4160
4161
static void addrconf_dad_start(struct inet6_ifaddr *ifp)
4162
{
4163
bool begin_dad = false;
4164
4165
spin_lock_bh(&ifp->lock);
4166
if (ifp->state != INET6_IFADDR_STATE_DEAD) {
4167
ifp->state = INET6_IFADDR_STATE_PREDAD;
4168
begin_dad = true;
4169
}
4170
spin_unlock_bh(&ifp->lock);
4171
4172
if (begin_dad)
4173
addrconf_mod_dad_work(ifp, 0);
4174
}
4175
4176
static void addrconf_dad_work(struct work_struct *w)
4177
{
4178
struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
4179
struct inet6_ifaddr,
4180
dad_work);
4181
struct inet6_dev *idev = ifp->idev;
4182
bool bump_id, disable_ipv6 = false;
4183
struct in6_addr mcaddr;
4184
struct net *net;
4185
4186
enum {
4187
DAD_PROCESS,
4188
DAD_BEGIN,
4189
DAD_ABORT,
4190
} action = DAD_PROCESS;
4191
4192
net = dev_net(idev->dev);
4193
4194
rtnl_net_lock(net);
4195
4196
spin_lock_bh(&ifp->lock);
4197
if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
4198
action = DAD_BEGIN;
4199
ifp->state = INET6_IFADDR_STATE_DAD;
4200
} else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
4201
action = DAD_ABORT;
4202
ifp->state = INET6_IFADDR_STATE_POSTDAD;
4203
4204
if ((READ_ONCE(net->ipv6.devconf_all->accept_dad) > 1 ||
4205
READ_ONCE(idev->cnf.accept_dad) > 1) &&
4206
!idev->cnf.disable_ipv6 &&
4207
!(ifp->flags & IFA_F_STABLE_PRIVACY)) {
4208
struct in6_addr addr;
4209
4210
addr.s6_addr32[0] = htonl(0xfe800000);
4211
addr.s6_addr32[1] = 0;
4212
4213
if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
4214
ipv6_addr_equal(&ifp->addr, &addr)) {
4215
/* DAD failed for link-local based on MAC */
4216
WRITE_ONCE(idev->cnf.disable_ipv6, 1);
4217
4218
pr_info("%s: IPv6 being disabled!\n",
4219
ifp->idev->dev->name);
4220
disable_ipv6 = true;
4221
}
4222
}
4223
}
4224
spin_unlock_bh(&ifp->lock);
4225
4226
if (action == DAD_BEGIN) {
4227
addrconf_dad_begin(ifp);
4228
goto out;
4229
} else if (action == DAD_ABORT) {
4230
in6_ifa_hold(ifp);
4231
addrconf_dad_stop(ifp, 1);
4232
if (disable_ipv6)
4233
addrconf_ifdown(idev->dev, false);
4234
goto out;
4235
}
4236
4237
if (!ifp->dad_probes && addrconf_dad_end(ifp))
4238
goto out;
4239
4240
write_lock_bh(&idev->lock);
4241
if (idev->dead || !(idev->if_flags & IF_READY)) {
4242
write_unlock_bh(&idev->lock);
4243
goto out;
4244
}
4245
4246
spin_lock(&ifp->lock);
4247
if (ifp->state == INET6_IFADDR_STATE_DEAD) {
4248
spin_unlock(&ifp->lock);
4249
write_unlock_bh(&idev->lock);
4250
goto out;
4251
}
4252
4253
if (ifp->dad_probes == 0) {
4254
bool send_na = false;
4255
4256
/*
4257
* DAD was successful
4258
*/
4259
4260
if (ifp->flags & IFA_F_TENTATIVE &&
4261
!(ifp->flags & IFA_F_OPTIMISTIC))
4262
send_na = true;
4263
bump_id = ifp->flags & IFA_F_TENTATIVE;
4264
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4265
spin_unlock(&ifp->lock);
4266
write_unlock_bh(&idev->lock);
4267
4268
addrconf_dad_completed(ifp, bump_id, send_na);
4269
4270
goto out;
4271
}
4272
4273
ifp->dad_probes--;
4274
addrconf_mod_dad_work(ifp,
4275
max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME),
4276
HZ/100));
4277
spin_unlock(&ifp->lock);
4278
write_unlock_bh(&idev->lock);
4279
4280
/* send a neighbour solicitation for our addr */
4281
addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
4282
ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
4283
ifp->dad_nonce);
4284
out:
4285
in6_ifa_put(ifp);
4286
rtnl_net_unlock(net);
4287
}
4288
4289
/* ifp->idev must be at least read locked */
4290
static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4291
{
4292
struct inet6_ifaddr *ifpiter;
4293
struct inet6_dev *idev = ifp->idev;
4294
4295
list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4296
if (ifpiter->scope > IFA_LINK)
4297
break;
4298
if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4299
(ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4300
IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4301
IFA_F_PERMANENT)
4302
return false;
4303
}
4304
return true;
4305
}
4306
4307
static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4308
bool send_na)
4309
{
4310
struct net_device *dev = ifp->idev->dev;
4311
struct in6_addr lladdr;
4312
bool send_rs, send_mld;
4313
4314
addrconf_del_dad_work(ifp);
4315
4316
/*
4317
* Configure the address for reception. Now it is valid.
4318
*/
4319
4320
ipv6_ifa_notify(RTM_NEWADDR, ifp);
4321
4322
/* If added prefix is link local and we are prepared to process
4323
router advertisements, start sending router solicitations.
4324
*/
4325
4326
read_lock_bh(&ifp->idev->lock);
4327
send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4328
send_rs = send_mld &&
4329
ipv6_accept_ra(ifp->idev) &&
4330
READ_ONCE(ifp->idev->cnf.rtr_solicits) != 0 &&
4331
(dev->flags & IFF_LOOPBACK) == 0 &&
4332
(dev->type != ARPHRD_TUNNEL) &&
4333
!netif_is_team_port(dev);
4334
read_unlock_bh(&ifp->idev->lock);
4335
4336
/* While dad is in progress mld report's source address is in6_addrany.
4337
* Resend with proper ll now.
4338
*/
4339
if (send_mld)
4340
ipv6_mc_dad_complete(ifp->idev);
4341
4342
/* send unsolicited NA if enabled */
4343
if (send_na &&
4344
(READ_ONCE(ifp->idev->cnf.ndisc_notify) ||
4345
READ_ONCE(dev_net(dev)->ipv6.devconf_all->ndisc_notify))) {
4346
ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
4347
/*router=*/ !!ifp->idev->cnf.forwarding,
4348
/*solicited=*/ false, /*override=*/ true,
4349
/*inc_opt=*/ true);
4350
}
4351
4352
if (send_rs) {
4353
/*
4354
* If a host as already performed a random delay
4355
* [...] as part of DAD [...] there is no need
4356
* to delay again before sending the first RS
4357
*/
4358
if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4359
return;
4360
ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4361
4362
write_lock_bh(&ifp->idev->lock);
4363
spin_lock(&ifp->lock);
4364
ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4365
READ_ONCE(ifp->idev->cnf.rtr_solicit_interval));
4366
ifp->idev->rs_probes = 1;
4367
ifp->idev->if_flags |= IF_RS_SENT;
4368
addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4369
spin_unlock(&ifp->lock);
4370
write_unlock_bh(&ifp->idev->lock);
4371
}
4372
4373
if (bump_id)
4374
rt_genid_bump_ipv6(dev_net(dev));
4375
4376
/* Make sure that a new temporary address will be created
4377
* before this temporary address becomes deprecated.
4378
*/
4379
if (ifp->flags & IFA_F_TEMPORARY)
4380
addrconf_verify_rtnl(dev_net(dev));
4381
}
4382
4383
static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4384
{
4385
struct inet6_ifaddr *ifp;
4386
4387
read_lock_bh(&idev->lock);
4388
list_for_each_entry(ifp, &idev->addr_list, if_list) {
4389
spin_lock(&ifp->lock);
4390
if ((ifp->flags & IFA_F_TENTATIVE &&
4391
ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4392
if (restart)
4393
ifp->state = INET6_IFADDR_STATE_PREDAD;
4394
addrconf_dad_kick(ifp);
4395
}
4396
spin_unlock(&ifp->lock);
4397
}
4398
read_unlock_bh(&idev->lock);
4399
}
4400
4401
#ifdef CONFIG_PROC_FS
4402
struct if6_iter_state {
4403
struct seq_net_private p;
4404
int bucket;
4405
int offset;
4406
};
4407
4408
static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4409
{
4410
struct if6_iter_state *state = seq->private;
4411
struct net *net = seq_file_net(seq);
4412
struct inet6_ifaddr *ifa = NULL;
4413
int p = 0;
4414
4415
/* initial bucket if pos is 0 */
4416
if (pos == 0) {
4417
state->bucket = 0;
4418
state->offset = 0;
4419
}
4420
4421
for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4422
hlist_for_each_entry_rcu(ifa, &net->ipv6.inet6_addr_lst[state->bucket],
4423
addr_lst) {
4424
/* sync with offset */
4425
if (p < state->offset) {
4426
p++;
4427
continue;
4428
}
4429
return ifa;
4430
}
4431
4432
/* prepare for next bucket */
4433
state->offset = 0;
4434
p = 0;
4435
}
4436
return NULL;
4437
}
4438
4439
static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4440
struct inet6_ifaddr *ifa)
4441
{
4442
struct if6_iter_state *state = seq->private;
4443
struct net *net = seq_file_net(seq);
4444
4445
hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4446
state->offset++;
4447
return ifa;
4448
}
4449
4450
state->offset = 0;
4451
while (++state->bucket < IN6_ADDR_HSIZE) {
4452
hlist_for_each_entry_rcu(ifa,
4453
&net->ipv6.inet6_addr_lst[state->bucket], addr_lst) {
4454
return ifa;
4455
}
4456
}
4457
4458
return NULL;
4459
}
4460
4461
static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4462
__acquires(rcu)
4463
{
4464
rcu_read_lock();
4465
return if6_get_first(seq, *pos);
4466
}
4467
4468
static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4469
{
4470
struct inet6_ifaddr *ifa;
4471
4472
ifa = if6_get_next(seq, v);
4473
++*pos;
4474
return ifa;
4475
}
4476
4477
static void if6_seq_stop(struct seq_file *seq, void *v)
4478
__releases(rcu)
4479
{
4480
rcu_read_unlock();
4481
}
4482
4483
static int if6_seq_show(struct seq_file *seq, void *v)
4484
{
4485
struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4486
seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4487
&ifp->addr,
4488
ifp->idev->dev->ifindex,
4489
ifp->prefix_len,
4490
ifp->scope,
4491
(u8) ifp->flags,
4492
ifp->idev->dev->name);
4493
return 0;
4494
}
4495
4496
static const struct seq_operations if6_seq_ops = {
4497
.start = if6_seq_start,
4498
.next = if6_seq_next,
4499
.show = if6_seq_show,
4500
.stop = if6_seq_stop,
4501
};
4502
4503
static int __net_init if6_proc_net_init(struct net *net)
4504
{
4505
if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops,
4506
sizeof(struct if6_iter_state)))
4507
return -ENOMEM;
4508
return 0;
4509
}
4510
4511
static void __net_exit if6_proc_net_exit(struct net *net)
4512
{
4513
remove_proc_entry("if_inet6", net->proc_net);
4514
}
4515
4516
static struct pernet_operations if6_proc_net_ops = {
4517
.init = if6_proc_net_init,
4518
.exit = if6_proc_net_exit,
4519
};
4520
4521
int __init if6_proc_init(void)
4522
{
4523
return register_pernet_subsys(&if6_proc_net_ops);
4524
}
4525
4526
void if6_proc_exit(void)
4527
{
4528
unregister_pernet_subsys(&if6_proc_net_ops);
4529
}
4530
#endif /* CONFIG_PROC_FS */
4531
4532
#if IS_ENABLED(CONFIG_IPV6_MIP6)
4533
/* Check if address is a home address configured on any interface. */
4534
int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4535
{
4536
unsigned int hash = inet6_addr_hash(net, addr);
4537
struct inet6_ifaddr *ifp = NULL;
4538
int ret = 0;
4539
4540
rcu_read_lock();
4541
hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4542
if (ipv6_addr_equal(&ifp->addr, addr) &&
4543
(ifp->flags & IFA_F_HOMEADDRESS)) {
4544
ret = 1;
4545
break;
4546
}
4547
}
4548
rcu_read_unlock();
4549
return ret;
4550
}
4551
#endif
4552
4553
/* RFC6554 has some algorithm to avoid loops in segment routing by
4554
* checking if the segments contains any of a local interface address.
4555
*
4556
* Quote:
4557
*
4558
* To detect loops in the SRH, a router MUST determine if the SRH
4559
* includes multiple addresses assigned to any interface on that router.
4560
* If such addresses appear more than once and are separated by at least
4561
* one address not assigned to that router.
4562
*/
4563
int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
4564
unsigned char nsegs)
4565
{
4566
const struct in6_addr *addr;
4567
int i, ret = 0, found = 0;
4568
struct inet6_ifaddr *ifp;
4569
bool separated = false;
4570
unsigned int hash;
4571
bool hash_found;
4572
4573
rcu_read_lock();
4574
for (i = 0; i < nsegs; i++) {
4575
addr = &segs[i];
4576
hash = inet6_addr_hash(net, addr);
4577
4578
hash_found = false;
4579
hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4580
4581
if (ipv6_addr_equal(&ifp->addr, addr)) {
4582
hash_found = true;
4583
break;
4584
}
4585
}
4586
4587
if (hash_found) {
4588
if (found > 1 && separated) {
4589
ret = 1;
4590
break;
4591
}
4592
4593
separated = false;
4594
found++;
4595
} else {
4596
separated = true;
4597
}
4598
}
4599
rcu_read_unlock();
4600
4601
return ret;
4602
}
4603
4604
/*
4605
* Periodic address status verification
4606
*/
4607
4608
static void addrconf_verify_rtnl(struct net *net)
4609
{
4610
unsigned long now, next, next_sec, next_sched;
4611
struct inet6_ifaddr *ifp;
4612
int i;
4613
4614
ASSERT_RTNL();
4615
4616
rcu_read_lock_bh();
4617
now = jiffies;
4618
next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4619
4620
cancel_delayed_work(&net->ipv6.addr_chk_work);
4621
4622
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4623
restart:
4624
hlist_for_each_entry_rcu_bh(ifp, &net->ipv6.inet6_addr_lst[i], addr_lst) {
4625
unsigned long age;
4626
4627
/* When setting preferred_lft to a value not zero or
4628
* infinity, while valid_lft is infinity
4629
* IFA_F_PERMANENT has a non-infinity life time.
4630
*/
4631
if ((ifp->flags & IFA_F_PERMANENT) &&
4632
(ifp->prefered_lft == INFINITY_LIFE_TIME))
4633
continue;
4634
4635
spin_lock(&ifp->lock);
4636
/* We try to batch several events at once. */
4637
age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4638
4639
if ((ifp->flags&IFA_F_TEMPORARY) &&
4640
!(ifp->flags&IFA_F_TENTATIVE) &&
4641
ifp->prefered_lft != INFINITY_LIFE_TIME &&
4642
!ifp->regen_count && ifp->ifpub) {
4643
/* This is a non-regenerated temporary addr. */
4644
4645
unsigned long regen_advance = ipv6_get_regen_advance(ifp->idev);
4646
4647
if (age + regen_advance >= ifp->prefered_lft) {
4648
struct inet6_ifaddr *ifpub = ifp->ifpub;
4649
if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4650
next = ifp->tstamp + ifp->prefered_lft * HZ;
4651
4652
ifp->regen_count++;
4653
in6_ifa_hold(ifp);
4654
in6_ifa_hold(ifpub);
4655
spin_unlock(&ifp->lock);
4656
4657
spin_lock(&ifpub->lock);
4658
ifpub->regen_count = 0;
4659
spin_unlock(&ifpub->lock);
4660
rcu_read_unlock_bh();
4661
ipv6_create_tempaddr(ifpub, true);
4662
in6_ifa_put(ifpub);
4663
in6_ifa_put(ifp);
4664
rcu_read_lock_bh();
4665
goto restart;
4666
} else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4667
next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4668
}
4669
4670
if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4671
age >= ifp->valid_lft) {
4672
spin_unlock(&ifp->lock);
4673
in6_ifa_hold(ifp);
4674
rcu_read_unlock_bh();
4675
ipv6_del_addr(ifp);
4676
rcu_read_lock_bh();
4677
goto restart;
4678
} else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4679
spin_unlock(&ifp->lock);
4680
continue;
4681
} else if (age >= ifp->prefered_lft) {
4682
/* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4683
int deprecate = 0;
4684
4685
if (!(ifp->flags&IFA_F_DEPRECATED)) {
4686
deprecate = 1;
4687
ifp->flags |= IFA_F_DEPRECATED;
4688
}
4689
4690
if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4691
(time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4692
next = ifp->tstamp + ifp->valid_lft * HZ;
4693
4694
spin_unlock(&ifp->lock);
4695
4696
if (deprecate) {
4697
in6_ifa_hold(ifp);
4698
4699
ipv6_ifa_notify(0, ifp);
4700
in6_ifa_put(ifp);
4701
goto restart;
4702
}
4703
} else {
4704
/* ifp->prefered_lft <= ifp->valid_lft */
4705
if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4706
next = ifp->tstamp + ifp->prefered_lft * HZ;
4707
spin_unlock(&ifp->lock);
4708
}
4709
}
4710
}
4711
4712
next_sec = round_jiffies_up(next);
4713
next_sched = next;
4714
4715
/* If rounded timeout is accurate enough, accept it. */
4716
if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4717
next_sched = next_sec;
4718
4719
/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4720
if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4721
next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4722
4723
pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4724
now, next, next_sec, next_sched);
4725
mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, next_sched - now);
4726
rcu_read_unlock_bh();
4727
}
4728
4729
static void addrconf_verify_work(struct work_struct *w)
4730
{
4731
struct net *net = container_of(to_delayed_work(w), struct net,
4732
ipv6.addr_chk_work);
4733
4734
rtnl_net_lock(net);
4735
addrconf_verify_rtnl(net);
4736
rtnl_net_unlock(net);
4737
}
4738
4739
static void addrconf_verify(struct net *net)
4740
{
4741
mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, 0);
4742
}
4743
4744
static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4745
struct in6_addr **peer_pfx)
4746
{
4747
struct in6_addr *pfx = NULL;
4748
4749
*peer_pfx = NULL;
4750
4751
if (addr)
4752
pfx = nla_data(addr);
4753
4754
if (local) {
4755
if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4756
*peer_pfx = pfx;
4757
pfx = nla_data(local);
4758
}
4759
4760
return pfx;
4761
}
4762
4763
static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4764
[IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4765
[IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4766
[IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4767
[IFA_FLAGS] = { .len = sizeof(u32) },
4768
[IFA_RT_PRIORITY] = { .len = sizeof(u32) },
4769
[IFA_TARGET_NETNSID] = { .type = NLA_S32 },
4770
[IFA_PROTO] = { .type = NLA_U8 },
4771
};
4772
4773
static int
4774
inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4775
struct netlink_ext_ack *extack)
4776
{
4777
struct net *net = sock_net(skb->sk);
4778
struct ifaddrmsg *ifm;
4779
struct nlattr *tb[IFA_MAX+1];
4780
struct in6_addr *pfx, *peer_pfx;
4781
u32 ifa_flags;
4782
int err;
4783
4784
err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4785
ifa_ipv6_policy, extack);
4786
if (err < 0)
4787
return err;
4788
4789
ifm = nlmsg_data(nlh);
4790
pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4791
if (!pfx)
4792
return -EINVAL;
4793
4794
ifa_flags = nla_get_u32_default(tb[IFA_FLAGS], ifm->ifa_flags);
4795
4796
/* We ignore other flags so far. */
4797
ifa_flags &= IFA_F_MANAGETEMPADDR;
4798
4799
rtnl_net_lock(net);
4800
err = inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4801
ifm->ifa_prefixlen, extack);
4802
rtnl_net_unlock(net);
4803
4804
return err;
4805
}
4806
4807
static int modify_prefix_route(struct net *net, struct inet6_ifaddr *ifp,
4808
unsigned long expires, u32 flags,
4809
bool modify_peer)
4810
{
4811
struct fib6_table *table;
4812
struct fib6_info *f6i;
4813
u32 prio;
4814
4815
f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4816
ifp->prefix_len,
4817
ifp->idev->dev, 0, RTF_DEFAULT, true);
4818
if (!f6i)
4819
return -ENOENT;
4820
4821
prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
4822
if (f6i->fib6_metric != prio) {
4823
/* delete old one */
4824
ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
4825
4826
/* add new one */
4827
addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4828
ifp->prefix_len,
4829
ifp->rt_priority, ifp->idev->dev,
4830
expires, flags, GFP_KERNEL);
4831
return 0;
4832
}
4833
if (f6i != net->ipv6.fib6_null_entry) {
4834
table = f6i->fib6_table;
4835
spin_lock_bh(&table->tb6_lock);
4836
4837
if (!(flags & RTF_EXPIRES)) {
4838
fib6_clean_expires(f6i);
4839
fib6_remove_gc_list(f6i);
4840
} else {
4841
fib6_set_expires(f6i, expires);
4842
fib6_add_gc_list(f6i);
4843
}
4844
4845
spin_unlock_bh(&table->tb6_lock);
4846
}
4847
fib6_info_release(f6i);
4848
4849
return 0;
4850
}
4851
4852
static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
4853
struct ifa6_config *cfg, clock_t expires,
4854
u32 flags)
4855
{
4856
bool was_managetempaddr;
4857
bool new_peer = false;
4858
bool had_prefixroute;
4859
4860
ASSERT_RTNL_NET(net);
4861
4862
if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR &&
4863
(ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4864
return -EINVAL;
4865
4866
if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED)
4867
cfg->ifa_flags &= ~IFA_F_OPTIMISTIC;
4868
4869
if (cfg->peer_pfx &&
4870
memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
4871
if (!ipv6_addr_any(&ifp->peer_addr))
4872
cleanup_prefix_route(ifp, expires, true, true);
4873
new_peer = true;
4874
}
4875
4876
spin_lock_bh(&ifp->lock);
4877
was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4878
had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4879
!(ifp->flags & IFA_F_NOPREFIXROUTE);
4880
ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4881
IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4882
IFA_F_NOPREFIXROUTE);
4883
ifp->flags |= cfg->ifa_flags;
4884
WRITE_ONCE(ifp->tstamp, jiffies);
4885
WRITE_ONCE(ifp->valid_lft, cfg->valid_lft);
4886
WRITE_ONCE(ifp->prefered_lft, cfg->preferred_lft);
4887
WRITE_ONCE(ifp->ifa_proto, cfg->ifa_proto);
4888
4889
if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
4890
WRITE_ONCE(ifp->rt_priority, cfg->rt_priority);
4891
4892
if (new_peer)
4893
ifp->peer_addr = *cfg->peer_pfx;
4894
4895
spin_unlock_bh(&ifp->lock);
4896
if (!(ifp->flags&IFA_F_TENTATIVE))
4897
ipv6_ifa_notify(0, ifp);
4898
4899
if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
4900
int rc = -ENOENT;
4901
4902
if (had_prefixroute)
4903
rc = modify_prefix_route(net, ifp, expires, flags, false);
4904
4905
/* prefix route could have been deleted; if so restore it */
4906
if (rc == -ENOENT) {
4907
addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
4908
ifp->rt_priority, ifp->idev->dev,
4909
expires, flags, GFP_KERNEL);
4910
}
4911
4912
if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
4913
rc = modify_prefix_route(net, ifp, expires, flags, true);
4914
4915
if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
4916
addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
4917
ifp->rt_priority, ifp->idev->dev,
4918
expires, flags, GFP_KERNEL);
4919
}
4920
} else if (had_prefixroute) {
4921
enum cleanup_prefix_rt_t action;
4922
unsigned long rt_expires;
4923
4924
write_lock_bh(&ifp->idev->lock);
4925
action = check_cleanup_prefix_route(ifp, &rt_expires);
4926
write_unlock_bh(&ifp->idev->lock);
4927
4928
if (action != CLEANUP_PREFIX_RT_NOP) {
4929
cleanup_prefix_route(ifp, rt_expires,
4930
action == CLEANUP_PREFIX_RT_DEL, false);
4931
}
4932
}
4933
4934
if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4935
if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
4936
delete_tempaddrs(ifp->idev, ifp);
4937
else
4938
manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
4939
cfg->preferred_lft, !was_managetempaddr,
4940
jiffies);
4941
}
4942
4943
addrconf_verify_rtnl(net);
4944
4945
return 0;
4946
}
4947
4948
static int
4949
inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4950
struct netlink_ext_ack *extack)
4951
{
4952
struct net *net = sock_net(skb->sk);
4953
struct nlattr *tb[IFA_MAX+1];
4954
struct in6_addr *peer_pfx;
4955
struct inet6_ifaddr *ifa;
4956
struct net_device *dev;
4957
struct inet6_dev *idev;
4958
struct ifa6_config cfg;
4959
struct ifaddrmsg *ifm;
4960
unsigned long timeout;
4961
clock_t expires;
4962
u32 flags;
4963
int err;
4964
4965
err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4966
ifa_ipv6_policy, extack);
4967
if (err < 0)
4968
return err;
4969
4970
memset(&cfg, 0, sizeof(cfg));
4971
4972
ifm = nlmsg_data(nlh);
4973
cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4974
if (!cfg.pfx)
4975
return -EINVAL;
4976
4977
cfg.peer_pfx = peer_pfx;
4978
cfg.plen = ifm->ifa_prefixlen;
4979
if (tb[IFA_RT_PRIORITY])
4980
cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
4981
4982
if (tb[IFA_PROTO])
4983
cfg.ifa_proto = nla_get_u8(tb[IFA_PROTO]);
4984
4985
cfg.ifa_flags = nla_get_u32_default(tb[IFA_FLAGS], ifm->ifa_flags);
4986
4987
/* We ignore other flags so far. */
4988
cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
4989
IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
4990
IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4991
4992
cfg.ifa_flags |= IFA_F_PERMANENT;
4993
cfg.valid_lft = INFINITY_LIFE_TIME;
4994
cfg.preferred_lft = INFINITY_LIFE_TIME;
4995
expires = 0;
4996
flags = 0;
4997
4998
if (tb[IFA_CACHEINFO]) {
4999
struct ifa_cacheinfo *ci;
5000
5001
ci = nla_data(tb[IFA_CACHEINFO]);
5002
cfg.valid_lft = ci->ifa_valid;
5003
cfg.preferred_lft = ci->ifa_prefered;
5004
5005
if (!cfg.valid_lft || cfg.preferred_lft > cfg.valid_lft) {
5006
NL_SET_ERR_MSG_MOD(extack, "address lifetime invalid");
5007
return -EINVAL;
5008
}
5009
5010
timeout = addrconf_timeout_fixup(cfg.valid_lft, HZ);
5011
if (addrconf_finite_timeout(timeout)) {
5012
cfg.ifa_flags &= ~IFA_F_PERMANENT;
5013
cfg.valid_lft = timeout;
5014
expires = jiffies_to_clock_t(timeout * HZ);
5015
flags = RTF_EXPIRES;
5016
}
5017
5018
timeout = addrconf_timeout_fixup(cfg.preferred_lft, HZ);
5019
if (addrconf_finite_timeout(timeout)) {
5020
if (timeout == 0)
5021
cfg.ifa_flags |= IFA_F_DEPRECATED;
5022
5023
cfg.preferred_lft = timeout;
5024
}
5025
}
5026
5027
rtnl_net_lock(net);
5028
5029
dev = __dev_get_by_index(net, ifm->ifa_index);
5030
if (!dev) {
5031
NL_SET_ERR_MSG_MOD(extack, "Unable to find the interface");
5032
err = -ENODEV;
5033
goto unlock_rtnl;
5034
}
5035
5036
netdev_lock_ops(dev);
5037
idev = ipv6_find_idev(dev);
5038
if (IS_ERR(idev)) {
5039
err = PTR_ERR(idev);
5040
goto unlock;
5041
}
5042
5043
if (!ipv6_allow_optimistic_dad(net, idev))
5044
cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
5045
5046
if (cfg.ifa_flags & IFA_F_NODAD &&
5047
cfg.ifa_flags & IFA_F_OPTIMISTIC) {
5048
NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive");
5049
err = -EINVAL;
5050
goto unlock;
5051
}
5052
5053
ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1);
5054
if (!ifa) {
5055
/*
5056
* It would be best to check for !NLM_F_CREATE here but
5057
* userspace already relies on not having to provide this.
5058
*/
5059
err = inet6_addr_add(net, dev, &cfg, expires, flags, extack);
5060
goto unlock;
5061
}
5062
5063
if (nlh->nlmsg_flags & NLM_F_EXCL ||
5064
!(nlh->nlmsg_flags & NLM_F_REPLACE)) {
5065
NL_SET_ERR_MSG_MOD(extack, "address already assigned");
5066
err = -EEXIST;
5067
} else {
5068
err = inet6_addr_modify(net, ifa, &cfg, expires, flags);
5069
}
5070
5071
in6_ifa_put(ifa);
5072
unlock:
5073
netdev_unlock_ops(dev);
5074
unlock_rtnl:
5075
rtnl_net_unlock(net);
5076
5077
return err;
5078
}
5079
5080
static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
5081
u8 scope, int ifindex)
5082
{
5083
struct ifaddrmsg *ifm;
5084
5085
ifm = nlmsg_data(nlh);
5086
ifm->ifa_family = AF_INET6;
5087
ifm->ifa_prefixlen = prefixlen;
5088
ifm->ifa_flags = flags;
5089
ifm->ifa_scope = scope;
5090
ifm->ifa_index = ifindex;
5091
}
5092
5093
static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
5094
unsigned long tstamp, u32 preferred, u32 valid)
5095
{
5096
struct ifa_cacheinfo ci;
5097
5098
ci.cstamp = cstamp_delta(cstamp);
5099
ci.tstamp = cstamp_delta(tstamp);
5100
ci.ifa_prefered = preferred;
5101
ci.ifa_valid = valid;
5102
5103
return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
5104
}
5105
5106
static inline int rt_scope(int ifa_scope)
5107
{
5108
if (ifa_scope & IFA_HOST)
5109
return RT_SCOPE_HOST;
5110
else if (ifa_scope & IFA_LINK)
5111
return RT_SCOPE_LINK;
5112
else if (ifa_scope & IFA_SITE)
5113
return RT_SCOPE_SITE;
5114
else
5115
return RT_SCOPE_UNIVERSE;
5116
}
5117
5118
static inline int inet6_ifaddr_msgsize(void)
5119
{
5120
return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
5121
+ nla_total_size(16) /* IFA_LOCAL */
5122
+ nla_total_size(16) /* IFA_ADDRESS */
5123
+ nla_total_size(sizeof(struct ifa_cacheinfo))
5124
+ nla_total_size(4) /* IFA_FLAGS */
5125
+ nla_total_size(1) /* IFA_PROTO */
5126
+ nla_total_size(4) /* IFA_RT_PRIORITY */;
5127
}
5128
5129
static int inet6_fill_ifaddr(struct sk_buff *skb,
5130
const struct inet6_ifaddr *ifa,
5131
struct inet6_fill_args *args)
5132
{
5133
struct nlmsghdr *nlh;
5134
u32 preferred, valid;
5135
u32 flags, priority;
5136
u8 proto;
5137
5138
nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5139
sizeof(struct ifaddrmsg), args->flags);
5140
if (!nlh)
5141
return -EMSGSIZE;
5142
5143
flags = READ_ONCE(ifa->flags);
5144
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
5145
ifa->idev->dev->ifindex);
5146
5147
if (args->netnsid >= 0 &&
5148
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
5149
goto error;
5150
5151
preferred = READ_ONCE(ifa->prefered_lft);
5152
valid = READ_ONCE(ifa->valid_lft);
5153
5154
if (!((flags & IFA_F_PERMANENT) &&
5155
(preferred == INFINITY_LIFE_TIME))) {
5156
if (preferred != INFINITY_LIFE_TIME) {
5157
long tval = (jiffies - READ_ONCE(ifa->tstamp)) / HZ;
5158
5159
if (preferred > tval)
5160
preferred -= tval;
5161
else
5162
preferred = 0;
5163
if (valid != INFINITY_LIFE_TIME) {
5164
if (valid > tval)
5165
valid -= tval;
5166
else
5167
valid = 0;
5168
}
5169
}
5170
} else {
5171
preferred = INFINITY_LIFE_TIME;
5172
valid = INFINITY_LIFE_TIME;
5173
}
5174
5175
if (!ipv6_addr_any(&ifa->peer_addr)) {
5176
if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
5177
nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
5178
goto error;
5179
} else {
5180
if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
5181
goto error;
5182
}
5183
5184
priority = READ_ONCE(ifa->rt_priority);
5185
if (priority && nla_put_u32(skb, IFA_RT_PRIORITY, priority))
5186
goto error;
5187
5188
if (put_cacheinfo(skb, ifa->cstamp, READ_ONCE(ifa->tstamp),
5189
preferred, valid) < 0)
5190
goto error;
5191
5192
if (nla_put_u32(skb, IFA_FLAGS, flags) < 0)
5193
goto error;
5194
5195
proto = READ_ONCE(ifa->ifa_proto);
5196
if (proto && nla_put_u8(skb, IFA_PROTO, proto))
5197
goto error;
5198
5199
nlmsg_end(skb, nlh);
5200
return 0;
5201
5202
error:
5203
nlmsg_cancel(skb, nlh);
5204
return -EMSGSIZE;
5205
}
5206
5207
int inet6_fill_ifmcaddr(struct sk_buff *skb,
5208
const struct ifmcaddr6 *ifmca,
5209
struct inet6_fill_args *args)
5210
{
5211
int ifindex = ifmca->idev->dev->ifindex;
5212
u8 scope = RT_SCOPE_UNIVERSE;
5213
struct nlmsghdr *nlh;
5214
5215
if (!args->force_rt_scope_universe &&
5216
ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
5217
scope = RT_SCOPE_SITE;
5218
5219
nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5220
sizeof(struct ifaddrmsg), args->flags);
5221
if (!nlh)
5222
return -EMSGSIZE;
5223
5224
if (args->netnsid >= 0 &&
5225
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5226
nlmsg_cancel(skb, nlh);
5227
return -EMSGSIZE;
5228
}
5229
5230
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5231
if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
5232
put_cacheinfo(skb, ifmca->mca_cstamp, READ_ONCE(ifmca->mca_tstamp),
5233
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5234
nlmsg_cancel(skb, nlh);
5235
return -EMSGSIZE;
5236
}
5237
5238
nlmsg_end(skb, nlh);
5239
return 0;
5240
}
5241
5242
int inet6_fill_ifacaddr(struct sk_buff *skb,
5243
const struct ifacaddr6 *ifaca,
5244
struct inet6_fill_args *args)
5245
{
5246
struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
5247
int ifindex = dev ? dev->ifindex : 1;
5248
u8 scope = RT_SCOPE_UNIVERSE;
5249
struct nlmsghdr *nlh;
5250
5251
if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
5252
scope = RT_SCOPE_SITE;
5253
5254
nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5255
sizeof(struct ifaddrmsg), args->flags);
5256
if (!nlh)
5257
return -EMSGSIZE;
5258
5259
if (args->netnsid >= 0 &&
5260
nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5261
nlmsg_cancel(skb, nlh);
5262
return -EMSGSIZE;
5263
}
5264
5265
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5266
if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
5267
put_cacheinfo(skb, ifaca->aca_cstamp, READ_ONCE(ifaca->aca_tstamp),
5268
INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5269
nlmsg_cancel(skb, nlh);
5270
return -EMSGSIZE;
5271
}
5272
5273
nlmsg_end(skb, nlh);
5274
return 0;
5275
}
5276
5277
/* called with rcu_read_lock() */
5278
static int in6_dump_addrs(const struct inet6_dev *idev, struct sk_buff *skb,
5279
struct netlink_callback *cb, int *s_ip_idx,
5280
struct inet6_fill_args *fillargs)
5281
{
5282
const struct ifmcaddr6 *ifmca;
5283
const struct ifacaddr6 *ifaca;
5284
int ip_idx = 0;
5285
int err = 0;
5286
5287
switch (fillargs->type) {
5288
case UNICAST_ADDR: {
5289
const struct inet6_ifaddr *ifa;
5290
fillargs->event = RTM_NEWADDR;
5291
5292
/* unicast address incl. temp addr */
5293
list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
5294
if (ip_idx < *s_ip_idx)
5295
goto next;
5296
err = inet6_fill_ifaddr(skb, ifa, fillargs);
5297
if (err < 0)
5298
break;
5299
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5300
next:
5301
ip_idx++;
5302
}
5303
break;
5304
}
5305
case MULTICAST_ADDR:
5306
fillargs->event = RTM_GETMULTICAST;
5307
5308
/* multicast address */
5309
for (ifmca = rcu_dereference(idev->mc_list);
5310
ifmca;
5311
ifmca = rcu_dereference(ifmca->next), ip_idx++) {
5312
if (ip_idx < *s_ip_idx)
5313
continue;
5314
err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
5315
if (err < 0)
5316
break;
5317
}
5318
break;
5319
case ANYCAST_ADDR:
5320
fillargs->event = RTM_GETANYCAST;
5321
/* anycast address */
5322
for (ifaca = rcu_dereference(idev->ac_list); ifaca;
5323
ifaca = rcu_dereference(ifaca->aca_next), ip_idx++) {
5324
if (ip_idx < *s_ip_idx)
5325
continue;
5326
err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
5327
if (err < 0)
5328
break;
5329
}
5330
break;
5331
default:
5332
break;
5333
}
5334
*s_ip_idx = err ? ip_idx : 0;
5335
return err;
5336
}
5337
5338
static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
5339
struct inet6_fill_args *fillargs,
5340
struct net **tgt_net, struct sock *sk,
5341
struct netlink_callback *cb)
5342
{
5343
struct netlink_ext_ack *extack = cb->extack;
5344
struct nlattr *tb[IFA_MAX+1];
5345
struct ifaddrmsg *ifm;
5346
int err, i;
5347
5348
ifm = nlmsg_payload(nlh, sizeof(*ifm));
5349
if (!ifm) {
5350
NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request");
5351
return -EINVAL;
5352
}
5353
5354
if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5355
NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request");
5356
return -EINVAL;
5357
}
5358
5359
fillargs->ifindex = ifm->ifa_index;
5360
if (fillargs->ifindex) {
5361
cb->answer_flags |= NLM_F_DUMP_FILTERED;
5362
fillargs->flags |= NLM_F_DUMP_FILTERED;
5363
}
5364
5365
err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5366
ifa_ipv6_policy, extack);
5367
if (err < 0)
5368
return err;
5369
5370
for (i = 0; i <= IFA_MAX; ++i) {
5371
if (!tb[i])
5372
continue;
5373
5374
if (i == IFA_TARGET_NETNSID) {
5375
struct net *net;
5376
5377
fillargs->netnsid = nla_get_s32(tb[i]);
5378
net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
5379
if (IS_ERR(net)) {
5380
fillargs->netnsid = -1;
5381
NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id");
5382
return PTR_ERR(net);
5383
}
5384
*tgt_net = net;
5385
} else {
5386
NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
5387
return -EINVAL;
5388
}
5389
}
5390
5391
return 0;
5392
}
5393
5394
static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5395
enum addr_type_t type)
5396
{
5397
struct net *tgt_net = sock_net(skb->sk);
5398
const struct nlmsghdr *nlh = cb->nlh;
5399
struct inet6_fill_args fillargs = {
5400
.portid = NETLINK_CB(cb->skb).portid,
5401
.seq = cb->nlh->nlmsg_seq,
5402
.flags = NLM_F_MULTI,
5403
.netnsid = -1,
5404
.type = type,
5405
.force_rt_scope_universe = false,
5406
};
5407
struct {
5408
unsigned long ifindex;
5409
int ip_idx;
5410
} *ctx = (void *)cb->ctx;
5411
struct net_device *dev;
5412
struct inet6_dev *idev;
5413
int err = 0;
5414
5415
rcu_read_lock();
5416
if (cb->strict_check) {
5417
err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
5418
skb->sk, cb);
5419
if (err < 0)
5420
goto done;
5421
5422
err = 0;
5423
if (fillargs.ifindex) {
5424
dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);
5425
if (!dev) {
5426
err = -ENODEV;
5427
goto done;
5428
}
5429
idev = __in6_dev_get(dev);
5430
if (idev)
5431
err = in6_dump_addrs(idev, skb, cb,
5432
&ctx->ip_idx,
5433
&fillargs);
5434
goto done;
5435
}
5436
}
5437
5438
cb->seq = inet6_base_seq(tgt_net);
5439
for_each_netdev_dump(tgt_net, dev, ctx->ifindex) {
5440
idev = __in6_dev_get(dev);
5441
if (!idev)
5442
continue;
5443
err = in6_dump_addrs(idev, skb, cb, &ctx->ip_idx,
5444
&fillargs);
5445
if (err < 0)
5446
goto done;
5447
}
5448
done:
5449
rcu_read_unlock();
5450
if (fillargs.netnsid >= 0)
5451
put_net(tgt_net);
5452
5453
return err;
5454
}
5455
5456
static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
5457
{
5458
enum addr_type_t type = UNICAST_ADDR;
5459
5460
return inet6_dump_addr(skb, cb, type);
5461
}
5462
5463
static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
5464
{
5465
enum addr_type_t type = MULTICAST_ADDR;
5466
5467
return inet6_dump_addr(skb, cb, type);
5468
}
5469
5470
5471
static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
5472
{
5473
enum addr_type_t type = ANYCAST_ADDR;
5474
5475
return inet6_dump_addr(skb, cb, type);
5476
}
5477
5478
static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
5479
const struct nlmsghdr *nlh,
5480
struct nlattr **tb,
5481
struct netlink_ext_ack *extack)
5482
{
5483
struct ifaddrmsg *ifm;
5484
int i, err;
5485
5486
ifm = nlmsg_payload(nlh, sizeof(*ifm));
5487
if (!ifm) {
5488
NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request");
5489
return -EINVAL;
5490
}
5491
5492
if (!netlink_strict_get_check(skb))
5493
return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
5494
ifa_ipv6_policy, extack);
5495
5496
if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5497
NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
5498
return -EINVAL;
5499
}
5500
5501
err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5502
ifa_ipv6_policy, extack);
5503
if (err)
5504
return err;
5505
5506
for (i = 0; i <= IFA_MAX; i++) {
5507
if (!tb[i])
5508
continue;
5509
5510
switch (i) {
5511
case IFA_TARGET_NETNSID:
5512
case IFA_ADDRESS:
5513
case IFA_LOCAL:
5514
break;
5515
default:
5516
NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request");
5517
return -EINVAL;
5518
}
5519
}
5520
5521
return 0;
5522
}
5523
5524
static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5525
struct netlink_ext_ack *extack)
5526
{
5527
struct net *tgt_net = sock_net(in_skb->sk);
5528
struct inet6_fill_args fillargs = {
5529
.portid = NETLINK_CB(in_skb).portid,
5530
.seq = nlh->nlmsg_seq,
5531
.event = RTM_NEWADDR,
5532
.flags = 0,
5533
.netnsid = -1,
5534
.force_rt_scope_universe = false,
5535
};
5536
struct ifaddrmsg *ifm;
5537
struct nlattr *tb[IFA_MAX+1];
5538
struct in6_addr *addr = NULL, *peer;
5539
struct net_device *dev = NULL;
5540
struct inet6_ifaddr *ifa;
5541
struct sk_buff *skb;
5542
int err;
5543
5544
err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
5545
if (err < 0)
5546
return err;
5547
5548
if (tb[IFA_TARGET_NETNSID]) {
5549
fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
5550
5551
tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
5552
fillargs.netnsid);
5553
if (IS_ERR(tgt_net))
5554
return PTR_ERR(tgt_net);
5555
}
5556
5557
addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
5558
if (!addr) {
5559
err = -EINVAL;
5560
goto errout;
5561
}
5562
ifm = nlmsg_data(nlh);
5563
if (ifm->ifa_index)
5564
dev = dev_get_by_index(tgt_net, ifm->ifa_index);
5565
5566
ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1);
5567
if (!ifa) {
5568
err = -EADDRNOTAVAIL;
5569
goto errout;
5570
}
5571
5572
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
5573
if (!skb) {
5574
err = -ENOBUFS;
5575
goto errout_ifa;
5576
}
5577
5578
err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5579
if (err < 0) {
5580
/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5581
WARN_ON(err == -EMSGSIZE);
5582
kfree_skb(skb);
5583
goto errout_ifa;
5584
}
5585
err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
5586
errout_ifa:
5587
in6_ifa_put(ifa);
5588
errout:
5589
dev_put(dev);
5590
if (fillargs.netnsid >= 0)
5591
put_net(tgt_net);
5592
5593
return err;
5594
}
5595
5596
static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
5597
{
5598
struct sk_buff *skb;
5599
struct net *net = dev_net(ifa->idev->dev);
5600
struct inet6_fill_args fillargs = {
5601
.portid = 0,
5602
.seq = 0,
5603
.event = event,
5604
.flags = 0,
5605
.netnsid = -1,
5606
.force_rt_scope_universe = false,
5607
};
5608
int err = -ENOBUFS;
5609
5610
skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
5611
if (!skb)
5612
goto errout;
5613
5614
err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5615
if (err < 0) {
5616
/* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5617
WARN_ON(err == -EMSGSIZE);
5618
kfree_skb(skb);
5619
goto errout;
5620
}
5621
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
5622
return;
5623
errout:
5624
rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
5625
}
5626
5627
static void ipv6_store_devconf(const struct ipv6_devconf *cnf,
5628
__s32 *array, int bytes)
5629
{
5630
BUG_ON(bytes < (DEVCONF_MAX * 4));
5631
5632
memset(array, 0, bytes);
5633
array[DEVCONF_FORWARDING] = READ_ONCE(cnf->forwarding);
5634
array[DEVCONF_HOPLIMIT] = READ_ONCE(cnf->hop_limit);
5635
array[DEVCONF_MTU6] = READ_ONCE(cnf->mtu6);
5636
array[DEVCONF_ACCEPT_RA] = READ_ONCE(cnf->accept_ra);
5637
array[DEVCONF_ACCEPT_REDIRECTS] = READ_ONCE(cnf->accept_redirects);
5638
array[DEVCONF_AUTOCONF] = READ_ONCE(cnf->autoconf);
5639
array[DEVCONF_DAD_TRANSMITS] = READ_ONCE(cnf->dad_transmits);
5640
array[DEVCONF_RTR_SOLICITS] = READ_ONCE(cnf->rtr_solicits);
5641
array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5642
jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_interval));
5643
array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5644
jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_max_interval));
5645
array[DEVCONF_RTR_SOLICIT_DELAY] =
5646
jiffies_to_msecs(READ_ONCE(cnf->rtr_solicit_delay));
5647
array[DEVCONF_FORCE_MLD_VERSION] = READ_ONCE(cnf->force_mld_version);
5648
array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5649
jiffies_to_msecs(READ_ONCE(cnf->mldv1_unsolicited_report_interval));
5650
array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5651
jiffies_to_msecs(READ_ONCE(cnf->mldv2_unsolicited_report_interval));
5652
array[DEVCONF_USE_TEMPADDR] = READ_ONCE(cnf->use_tempaddr);
5653
array[DEVCONF_TEMP_VALID_LFT] = READ_ONCE(cnf->temp_valid_lft);
5654
array[DEVCONF_TEMP_PREFERED_LFT] = READ_ONCE(cnf->temp_prefered_lft);
5655
array[DEVCONF_REGEN_MAX_RETRY] = READ_ONCE(cnf->regen_max_retry);
5656
array[DEVCONF_MAX_DESYNC_FACTOR] = READ_ONCE(cnf->max_desync_factor);
5657
array[DEVCONF_MAX_ADDRESSES] = READ_ONCE(cnf->max_addresses);
5658
array[DEVCONF_ACCEPT_RA_DEFRTR] = READ_ONCE(cnf->accept_ra_defrtr);
5659
array[DEVCONF_RA_DEFRTR_METRIC] = READ_ONCE(cnf->ra_defrtr_metric);
5660
array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] =
5661
READ_ONCE(cnf->accept_ra_min_hop_limit);
5662
array[DEVCONF_ACCEPT_RA_PINFO] = READ_ONCE(cnf->accept_ra_pinfo);
5663
#ifdef CONFIG_IPV6_ROUTER_PREF
5664
array[DEVCONF_ACCEPT_RA_RTR_PREF] = READ_ONCE(cnf->accept_ra_rtr_pref);
5665
array[DEVCONF_RTR_PROBE_INTERVAL] =
5666
jiffies_to_msecs(READ_ONCE(cnf->rtr_probe_interval));
5667
#ifdef CONFIG_IPV6_ROUTE_INFO
5668
array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] =
5669
READ_ONCE(cnf->accept_ra_rt_info_min_plen);
5670
array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] =
5671
READ_ONCE(cnf->accept_ra_rt_info_max_plen);
5672
#endif
5673
#endif
5674
array[DEVCONF_PROXY_NDP] = READ_ONCE(cnf->proxy_ndp);
5675
array[DEVCONF_ACCEPT_SOURCE_ROUTE] =
5676
READ_ONCE(cnf->accept_source_route);
5677
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5678
array[DEVCONF_OPTIMISTIC_DAD] = READ_ONCE(cnf->optimistic_dad);
5679
array[DEVCONF_USE_OPTIMISTIC] = READ_ONCE(cnf->use_optimistic);
5680
#endif
5681
#ifdef CONFIG_IPV6_MROUTE
5682
array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
5683
#endif
5684
array[DEVCONF_DISABLE_IPV6] = READ_ONCE(cnf->disable_ipv6);
5685
array[DEVCONF_ACCEPT_DAD] = READ_ONCE(cnf->accept_dad);
5686
array[DEVCONF_FORCE_TLLAO] = READ_ONCE(cnf->force_tllao);
5687
array[DEVCONF_NDISC_NOTIFY] = READ_ONCE(cnf->ndisc_notify);
5688
array[DEVCONF_SUPPRESS_FRAG_NDISC] =
5689
READ_ONCE(cnf->suppress_frag_ndisc);
5690
array[DEVCONF_ACCEPT_RA_FROM_LOCAL] =
5691
READ_ONCE(cnf->accept_ra_from_local);
5692
array[DEVCONF_ACCEPT_RA_MTU] = READ_ONCE(cnf->accept_ra_mtu);
5693
array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] =
5694
READ_ONCE(cnf->ignore_routes_with_linkdown);
5695
/* we omit DEVCONF_STABLE_SECRET for now */
5696
array[DEVCONF_USE_OIF_ADDRS_ONLY] = READ_ONCE(cnf->use_oif_addrs_only);
5697
array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] =
5698
READ_ONCE(cnf->drop_unicast_in_l2_multicast);
5699
array[DEVCONF_DROP_UNSOLICITED_NA] = READ_ONCE(cnf->drop_unsolicited_na);
5700
array[DEVCONF_KEEP_ADDR_ON_DOWN] = READ_ONCE(cnf->keep_addr_on_down);
5701
array[DEVCONF_SEG6_ENABLED] = READ_ONCE(cnf->seg6_enabled);
5702
#ifdef CONFIG_IPV6_SEG6_HMAC
5703
array[DEVCONF_SEG6_REQUIRE_HMAC] = READ_ONCE(cnf->seg6_require_hmac);
5704
#endif
5705
array[DEVCONF_ENHANCED_DAD] = READ_ONCE(cnf->enhanced_dad);
5706
array[DEVCONF_ADDR_GEN_MODE] = READ_ONCE(cnf->addr_gen_mode);
5707
array[DEVCONF_DISABLE_POLICY] = READ_ONCE(cnf->disable_policy);
5708
array[DEVCONF_NDISC_TCLASS] = READ_ONCE(cnf->ndisc_tclass);
5709
array[DEVCONF_RPL_SEG_ENABLED] = READ_ONCE(cnf->rpl_seg_enabled);
5710
array[DEVCONF_IOAM6_ENABLED] = READ_ONCE(cnf->ioam6_enabled);
5711
array[DEVCONF_IOAM6_ID] = READ_ONCE(cnf->ioam6_id);
5712
array[DEVCONF_IOAM6_ID_WIDE] = READ_ONCE(cnf->ioam6_id_wide);
5713
array[DEVCONF_NDISC_EVICT_NOCARRIER] =
5714
READ_ONCE(cnf->ndisc_evict_nocarrier);
5715
array[DEVCONF_ACCEPT_UNTRACKED_NA] =
5716
READ_ONCE(cnf->accept_untracked_na);
5717
array[DEVCONF_ACCEPT_RA_MIN_LFT] = READ_ONCE(cnf->accept_ra_min_lft);
5718
array[DEVCONF_FORCE_FORWARDING] = READ_ONCE(cnf->force_forwarding);
5719
}
5720
5721
static inline size_t inet6_ifla6_size(void)
5722
{
5723
return nla_total_size(4) /* IFLA_INET6_FLAGS */
5724
+ nla_total_size(sizeof(struct ifla_cacheinfo))
5725
+ nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5726
+ nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5727
+ nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5728
+ nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
5729
+ nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
5730
+ nla_total_size(4) /* IFLA_INET6_RA_MTU */
5731
+ 0;
5732
}
5733
5734
static inline size_t inet6_if_nlmsg_size(void)
5735
{
5736
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5737
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5738
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5739
+ nla_total_size(4) /* IFLA_MTU */
5740
+ nla_total_size(4) /* IFLA_LINK */
5741
+ nla_total_size(1) /* IFLA_OPERSTATE */
5742
+ nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5743
}
5744
5745
static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5746
int bytes)
5747
{
5748
int i;
5749
int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5750
BUG_ON(pad < 0);
5751
5752
/* Use put_unaligned() because stats may not be aligned for u64. */
5753
put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5754
for (i = 1; i < ICMP6_MIB_MAX; i++)
5755
put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5756
5757
memset(&stats[ICMP6_MIB_MAX], 0, pad);
5758
}
5759
5760
static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5761
int bytes, size_t syncpoff)
5762
{
5763
int i, c;
5764
u64 buff[IPSTATS_MIB_MAX];
5765
int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5766
5767
BUG_ON(pad < 0);
5768
5769
memset(buff, 0, sizeof(buff));
5770
buff[0] = IPSTATS_MIB_MAX;
5771
5772
for_each_possible_cpu(c) {
5773
for (i = 1; i < IPSTATS_MIB_MAX; i++)
5774
buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5775
}
5776
5777
memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5778
memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5779
}
5780
5781
static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5782
int bytes)
5783
{
5784
switch (attrtype) {
5785
case IFLA_INET6_STATS:
5786
__snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5787
offsetof(struct ipstats_mib, syncp));
5788
break;
5789
case IFLA_INET6_ICMP6STATS:
5790
__snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5791
break;
5792
}
5793
}
5794
5795
static int inet6_fill_ifla6_stats_attrs(struct sk_buff *skb,
5796
struct inet6_dev *idev)
5797
{
5798
struct nlattr *nla;
5799
5800
nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5801
if (!nla)
5802
goto nla_put_failure;
5803
snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5804
5805
nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5806
if (!nla)
5807
goto nla_put_failure;
5808
snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5809
5810
return 0;
5811
5812
nla_put_failure:
5813
return -EMSGSIZE;
5814
}
5815
5816
static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5817
u32 ext_filter_mask)
5818
{
5819
struct ifla_cacheinfo ci;
5820
struct nlattr *nla;
5821
u32 ra_mtu;
5822
5823
if (nla_put_u32(skb, IFLA_INET6_FLAGS, READ_ONCE(idev->if_flags)))
5824
goto nla_put_failure;
5825
ci.max_reasm_len = IPV6_MAXPLEN;
5826
ci.tstamp = cstamp_delta(READ_ONCE(idev->tstamp));
5827
ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5828
ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5829
if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5830
goto nla_put_failure;
5831
nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5832
if (!nla)
5833
goto nla_put_failure;
5834
ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5835
5836
/* XXX - MC not implemented */
5837
5838
if (!(ext_filter_mask & RTEXT_FILTER_SKIP_STATS)) {
5839
if (inet6_fill_ifla6_stats_attrs(skb, idev) < 0)
5840
goto nla_put_failure;
5841
}
5842
5843
nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5844
if (!nla)
5845
goto nla_put_failure;
5846
read_lock_bh(&idev->lock);
5847
memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5848
read_unlock_bh(&idev->lock);
5849
5850
if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE,
5851
READ_ONCE(idev->cnf.addr_gen_mode)))
5852
goto nla_put_failure;
5853
5854
ra_mtu = READ_ONCE(idev->ra_mtu);
5855
if (ra_mtu && nla_put_u32(skb, IFLA_INET6_RA_MTU, ra_mtu))
5856
goto nla_put_failure;
5857
5858
return 0;
5859
5860
nla_put_failure:
5861
return -EMSGSIZE;
5862
}
5863
5864
static size_t inet6_get_link_af_size(const struct net_device *dev,
5865
u32 ext_filter_mask)
5866
{
5867
if (!__in6_dev_get(dev))
5868
return 0;
5869
5870
return inet6_ifla6_size();
5871
}
5872
5873
static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5874
u32 ext_filter_mask)
5875
{
5876
struct inet6_dev *idev = __in6_dev_get(dev);
5877
5878
if (!idev)
5879
return -ENODATA;
5880
5881
if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5882
return -EMSGSIZE;
5883
5884
return 0;
5885
}
5886
5887
static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
5888
struct netlink_ext_ack *extack)
5889
{
5890
struct inet6_ifaddr *ifp;
5891
struct net_device *dev = idev->dev;
5892
bool clear_token, update_rs = false;
5893
struct in6_addr ll_addr;
5894
5895
ASSERT_RTNL();
5896
5897
if (!token)
5898
return -EINVAL;
5899
5900
if (dev->flags & IFF_LOOPBACK) {
5901
NL_SET_ERR_MSG_MOD(extack, "Device is loopback");
5902
return -EINVAL;
5903
}
5904
5905
if (dev->flags & IFF_NOARP) {
5906
NL_SET_ERR_MSG_MOD(extack,
5907
"Device does not do neighbour discovery");
5908
return -EINVAL;
5909
}
5910
5911
if (!ipv6_accept_ra(idev)) {
5912
NL_SET_ERR_MSG_MOD(extack,
5913
"Router advertisement is disabled on device");
5914
return -EINVAL;
5915
}
5916
5917
if (READ_ONCE(idev->cnf.rtr_solicits) == 0) {
5918
NL_SET_ERR_MSG(extack,
5919
"Router solicitation is disabled on device");
5920
return -EINVAL;
5921
}
5922
5923
write_lock_bh(&idev->lock);
5924
5925
BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5926
memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5927
5928
write_unlock_bh(&idev->lock);
5929
5930
clear_token = ipv6_addr_any(token);
5931
if (clear_token)
5932
goto update_lft;
5933
5934
if (!idev->dead && (idev->if_flags & IF_READY) &&
5935
!ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5936
IFA_F_OPTIMISTIC)) {
5937
/* If we're not ready, then normal ifup will take care
5938
* of this. Otherwise, we need to request our rs here.
5939
*/
5940
ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5941
update_rs = true;
5942
}
5943
5944
update_lft:
5945
write_lock_bh(&idev->lock);
5946
5947
if (update_rs) {
5948
idev->if_flags |= IF_RS_SENT;
5949
idev->rs_interval = rfc3315_s14_backoff_init(
5950
READ_ONCE(idev->cnf.rtr_solicit_interval));
5951
idev->rs_probes = 1;
5952
addrconf_mod_rs_timer(idev, idev->rs_interval);
5953
}
5954
5955
/* Well, that's kinda nasty ... */
5956
list_for_each_entry(ifp, &idev->addr_list, if_list) {
5957
spin_lock(&ifp->lock);
5958
if (ifp->tokenized) {
5959
ifp->valid_lft = 0;
5960
ifp->prefered_lft = 0;
5961
}
5962
spin_unlock(&ifp->lock);
5963
}
5964
5965
write_unlock_bh(&idev->lock);
5966
inet6_ifinfo_notify(RTM_NEWLINK, idev);
5967
addrconf_verify_rtnl(dev_net(dev));
5968
return 0;
5969
}
5970
5971
static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5972
[IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5973
[IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5974
[IFLA_INET6_RA_MTU] = { .type = NLA_REJECT,
5975
.reject_message =
5976
"IFLA_INET6_RA_MTU can not be set" },
5977
};
5978
5979
static int check_addr_gen_mode(int mode)
5980
{
5981
if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5982
mode != IN6_ADDR_GEN_MODE_NONE &&
5983
mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5984
mode != IN6_ADDR_GEN_MODE_RANDOM)
5985
return -EINVAL;
5986
return 1;
5987
}
5988
5989
static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5990
int mode)
5991
{
5992
if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5993
!idev->cnf.stable_secret.initialized &&
5994
!net->ipv6.devconf_dflt->stable_secret.initialized)
5995
return -EINVAL;
5996
return 1;
5997
}
5998
5999
static int inet6_validate_link_af(const struct net_device *dev,
6000
const struct nlattr *nla,
6001
struct netlink_ext_ack *extack)
6002
{
6003
struct nlattr *tb[IFLA_INET6_MAX + 1];
6004
struct inet6_dev *idev = NULL;
6005
int err;
6006
6007
if (dev) {
6008
idev = __in6_dev_get(dev);
6009
if (!idev)
6010
return -EAFNOSUPPORT;
6011
}
6012
6013
err = nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla,
6014
inet6_af_policy, extack);
6015
if (err)
6016
return err;
6017
6018
if (!tb[IFLA_INET6_TOKEN] && !tb[IFLA_INET6_ADDR_GEN_MODE])
6019
return -EINVAL;
6020
6021
if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
6022
u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
6023
6024
if (check_addr_gen_mode(mode) < 0)
6025
return -EINVAL;
6026
if (dev && check_stable_privacy(idev, dev_net(dev), mode) < 0)
6027
return -EINVAL;
6028
}
6029
6030
return 0;
6031
}
6032
6033
static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
6034
struct netlink_ext_ack *extack)
6035
{
6036
struct inet6_dev *idev = __in6_dev_get(dev);
6037
struct nlattr *tb[IFLA_INET6_MAX + 1];
6038
int err;
6039
6040
if (!idev)
6041
return -EAFNOSUPPORT;
6042
6043
if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
6044
return -EINVAL;
6045
6046
if (tb[IFLA_INET6_TOKEN]) {
6047
err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
6048
extack);
6049
if (err)
6050
return err;
6051
}
6052
6053
if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
6054
u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
6055
6056
WRITE_ONCE(idev->cnf.addr_gen_mode, mode);
6057
}
6058
6059
return 0;
6060
}
6061
6062
static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
6063
u32 portid, u32 seq, int event, unsigned int flags)
6064
{
6065
struct net_device *dev = idev->dev;
6066
struct ifinfomsg *hdr;
6067
struct nlmsghdr *nlh;
6068
int ifindex, iflink;
6069
void *protoinfo;
6070
6071
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
6072
if (!nlh)
6073
return -EMSGSIZE;
6074
6075
hdr = nlmsg_data(nlh);
6076
hdr->ifi_family = AF_INET6;
6077
hdr->__ifi_pad = 0;
6078
hdr->ifi_type = dev->type;
6079
ifindex = READ_ONCE(dev->ifindex);
6080
hdr->ifi_index = ifindex;
6081
hdr->ifi_flags = netif_get_flags(dev);
6082
hdr->ifi_change = 0;
6083
6084
iflink = dev_get_iflink(dev);
6085
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
6086
(dev->addr_len &&
6087
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
6088
nla_put_u32(skb, IFLA_MTU, READ_ONCE(dev->mtu)) ||
6089
(ifindex != iflink &&
6090
nla_put_u32(skb, IFLA_LINK, iflink)) ||
6091
nla_put_u8(skb, IFLA_OPERSTATE,
6092
netif_running(dev) ? READ_ONCE(dev->operstate) : IF_OPER_DOWN))
6093
goto nla_put_failure;
6094
protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
6095
if (!protoinfo)
6096
goto nla_put_failure;
6097
6098
if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
6099
goto nla_put_failure;
6100
6101
nla_nest_end(skb, protoinfo);
6102
nlmsg_end(skb, nlh);
6103
return 0;
6104
6105
nla_put_failure:
6106
nlmsg_cancel(skb, nlh);
6107
return -EMSGSIZE;
6108
}
6109
6110
static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
6111
struct netlink_ext_ack *extack)
6112
{
6113
struct ifinfomsg *ifm;
6114
6115
ifm = nlmsg_payload(nlh, sizeof(*ifm));
6116
if (!ifm) {
6117
NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request");
6118
return -EINVAL;
6119
}
6120
6121
if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
6122
NL_SET_ERR_MSG_MOD(extack, "Invalid data after header");
6123
return -EINVAL;
6124
}
6125
6126
if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
6127
ifm->ifi_change || ifm->ifi_index) {
6128
NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request");
6129
return -EINVAL;
6130
}
6131
6132
return 0;
6133
}
6134
6135
static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
6136
{
6137
struct net *net = sock_net(skb->sk);
6138
struct {
6139
unsigned long ifindex;
6140
} *ctx = (void *)cb->ctx;
6141
struct net_device *dev;
6142
struct inet6_dev *idev;
6143
int err;
6144
6145
/* only requests using strict checking can pass data to
6146
* influence the dump
6147
*/
6148
if (cb->strict_check) {
6149
err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
6150
6151
if (err < 0)
6152
return err;
6153
}
6154
6155
err = 0;
6156
rcu_read_lock();
6157
for_each_netdev_dump(net, dev, ctx->ifindex) {
6158
idev = __in6_dev_get(dev);
6159
if (!idev)
6160
continue;
6161
err = inet6_fill_ifinfo(skb, idev,
6162
NETLINK_CB(cb->skb).portid,
6163
cb->nlh->nlmsg_seq,
6164
RTM_NEWLINK, NLM_F_MULTI);
6165
if (err < 0)
6166
break;
6167
}
6168
rcu_read_unlock();
6169
6170
return err;
6171
}
6172
6173
void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
6174
{
6175
struct sk_buff *skb;
6176
struct net *net = dev_net(idev->dev);
6177
int err = -ENOBUFS;
6178
6179
skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
6180
if (!skb)
6181
goto errout;
6182
6183
err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
6184
if (err < 0) {
6185
/* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
6186
WARN_ON(err == -EMSGSIZE);
6187
kfree_skb(skb);
6188
goto errout;
6189
}
6190
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
6191
return;
6192
errout:
6193
rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
6194
}
6195
6196
static inline size_t inet6_prefix_nlmsg_size(void)
6197
{
6198
return NLMSG_ALIGN(sizeof(struct prefixmsg))
6199
+ nla_total_size(sizeof(struct in6_addr))
6200
+ nla_total_size(sizeof(struct prefix_cacheinfo));
6201
}
6202
6203
static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
6204
struct prefix_info *pinfo, u32 portid, u32 seq,
6205
int event, unsigned int flags)
6206
{
6207
struct prefixmsg *pmsg;
6208
struct nlmsghdr *nlh;
6209
struct prefix_cacheinfo ci;
6210
6211
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
6212
if (!nlh)
6213
return -EMSGSIZE;
6214
6215
pmsg = nlmsg_data(nlh);
6216
pmsg->prefix_family = AF_INET6;
6217
pmsg->prefix_pad1 = 0;
6218
pmsg->prefix_pad2 = 0;
6219
pmsg->prefix_ifindex = idev->dev->ifindex;
6220
pmsg->prefix_len = pinfo->prefix_len;
6221
pmsg->prefix_type = pinfo->type;
6222
pmsg->prefix_pad3 = 0;
6223
pmsg->prefix_flags = pinfo->flags;
6224
6225
if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
6226
goto nla_put_failure;
6227
ci.preferred_time = ntohl(pinfo->prefered);
6228
ci.valid_time = ntohl(pinfo->valid);
6229
if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
6230
goto nla_put_failure;
6231
nlmsg_end(skb, nlh);
6232
return 0;
6233
6234
nla_put_failure:
6235
nlmsg_cancel(skb, nlh);
6236
return -EMSGSIZE;
6237
}
6238
6239
static void inet6_prefix_notify(int event, struct inet6_dev *idev,
6240
struct prefix_info *pinfo)
6241
{
6242
struct sk_buff *skb;
6243
struct net *net = dev_net(idev->dev);
6244
int err = -ENOBUFS;
6245
6246
skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
6247
if (!skb)
6248
goto errout;
6249
6250
err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
6251
if (err < 0) {
6252
/* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
6253
WARN_ON(err == -EMSGSIZE);
6254
kfree_skb(skb);
6255
goto errout;
6256
}
6257
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
6258
return;
6259
errout:
6260
rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
6261
}
6262
6263
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6264
{
6265
struct net *net = dev_net(ifp->idev->dev);
6266
6267
if (event)
6268
ASSERT_RTNL();
6269
6270
inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
6271
6272
switch (event) {
6273
case RTM_NEWADDR:
6274
/*
6275
* If the address was optimistic we inserted the route at the
6276
* start of our DAD process, so we don't need to do it again.
6277
* If the device was taken down in the middle of the DAD
6278
* cycle there is a race where we could get here without a
6279
* host route, so nothing to insert. That will be fixed when
6280
* the device is brought up.
6281
*/
6282
if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
6283
ip6_ins_rt(net, ifp->rt);
6284
} else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
6285
pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
6286
&ifp->addr, ifp->idev->dev->name);
6287
}
6288
6289
if (ifp->idev->cnf.forwarding)
6290
addrconf_join_anycast(ifp);
6291
if (!ipv6_addr_any(&ifp->peer_addr))
6292
addrconf_prefix_route(&ifp->peer_addr, 128,
6293
ifp->rt_priority, ifp->idev->dev,
6294
0, 0, GFP_ATOMIC);
6295
break;
6296
case RTM_DELADDR:
6297
if (ifp->idev->cnf.forwarding)
6298
addrconf_leave_anycast(ifp);
6299
addrconf_leave_solict(ifp->idev, &ifp->addr);
6300
if (!ipv6_addr_any(&ifp->peer_addr)) {
6301
struct fib6_info *rt;
6302
6303
rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
6304
ifp->idev->dev, 0, 0,
6305
false);
6306
if (rt)
6307
ip6_del_rt(net, rt, false);
6308
}
6309
if (ifp->rt) {
6310
ip6_del_rt(net, ifp->rt, false);
6311
ifp->rt = NULL;
6312
}
6313
rt_genid_bump_ipv6(net);
6314
break;
6315
}
6316
atomic_inc(&net->ipv6.dev_addr_genid);
6317
}
6318
6319
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6320
{
6321
if (likely(ifp->idev->dead == 0))
6322
__ipv6_ifa_notify(event, ifp);
6323
}
6324
6325
#ifdef CONFIG_SYSCTL
6326
6327
static int addrconf_sysctl_forward(const struct ctl_table *ctl, int write,
6328
void *buffer, size_t *lenp, loff_t *ppos)
6329
{
6330
int *valp = ctl->data;
6331
int val = *valp;
6332
loff_t pos = *ppos;
6333
struct ctl_table lctl;
6334
int ret;
6335
6336
/*
6337
* ctl->data points to idev->cnf.forwarding, we should
6338
* not modify it until we get the rtnl lock.
6339
*/
6340
lctl = *ctl;
6341
lctl.data = &val;
6342
6343
ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6344
6345
if (write)
6346
ret = addrconf_fixup_forwarding(ctl, valp, val);
6347
if (ret)
6348
*ppos = pos;
6349
return ret;
6350
}
6351
6352
static int addrconf_sysctl_mtu(const struct ctl_table *ctl, int write,
6353
void *buffer, size_t *lenp, loff_t *ppos)
6354
{
6355
struct inet6_dev *idev = ctl->extra1;
6356
int min_mtu = IPV6_MIN_MTU;
6357
struct ctl_table lctl;
6358
6359
lctl = *ctl;
6360
lctl.extra1 = &min_mtu;
6361
lctl.extra2 = idev ? &idev->dev->mtu : NULL;
6362
6363
return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
6364
}
6365
6366
static void dev_disable_change(struct inet6_dev *idev)
6367
{
6368
struct netdev_notifier_info info;
6369
6370
if (!idev || !idev->dev)
6371
return;
6372
6373
netdev_notifier_info_init(&info, idev->dev);
6374
if (idev->cnf.disable_ipv6)
6375
addrconf_notify(NULL, NETDEV_DOWN, &info);
6376
else
6377
addrconf_notify(NULL, NETDEV_UP, &info);
6378
}
6379
6380
static void addrconf_disable_change(struct net *net, __s32 newf)
6381
{
6382
struct net_device *dev;
6383
struct inet6_dev *idev;
6384
6385
for_each_netdev(net, dev) {
6386
idev = __in6_dev_get_rtnl_net(dev);
6387
if (idev) {
6388
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
6389
6390
WRITE_ONCE(idev->cnf.disable_ipv6, newf);
6391
if (changed)
6392
dev_disable_change(idev);
6393
}
6394
}
6395
}
6396
6397
static int addrconf_disable_ipv6(const struct ctl_table *table, int *p, int newf)
6398
{
6399
struct net *net = (struct net *)table->extra2;
6400
int old;
6401
6402
if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
6403
WRITE_ONCE(*p, newf);
6404
return 0;
6405
}
6406
6407
if (!rtnl_net_trylock(net))
6408
return restart_syscall();
6409
6410
old = *p;
6411
WRITE_ONCE(*p, newf);
6412
6413
if (p == &net->ipv6.devconf_all->disable_ipv6) {
6414
WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf);
6415
addrconf_disable_change(net, newf);
6416
} else if ((!newf) ^ (!old)) {
6417
dev_disable_change((struct inet6_dev *)table->extra1);
6418
}
6419
6420
rtnl_net_unlock(net);
6421
return 0;
6422
}
6423
6424
static int addrconf_sysctl_disable(const struct ctl_table *ctl, int write,
6425
void *buffer, size_t *lenp, loff_t *ppos)
6426
{
6427
int *valp = ctl->data;
6428
int val = *valp;
6429
loff_t pos = *ppos;
6430
struct ctl_table lctl;
6431
int ret;
6432
6433
/*
6434
* ctl->data points to idev->cnf.disable_ipv6, we should
6435
* not modify it until we get the rtnl lock.
6436
*/
6437
lctl = *ctl;
6438
lctl.data = &val;
6439
6440
ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6441
6442
if (write)
6443
ret = addrconf_disable_ipv6(ctl, valp, val);
6444
if (ret)
6445
*ppos = pos;
6446
return ret;
6447
}
6448
6449
static int addrconf_sysctl_proxy_ndp(const struct ctl_table *ctl, int write,
6450
void *buffer, size_t *lenp, loff_t *ppos)
6451
{
6452
int *valp = ctl->data;
6453
int ret;
6454
int old, new;
6455
6456
old = *valp;
6457
ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6458
new = *valp;
6459
6460
if (write && old != new) {
6461
struct net *net = ctl->extra2;
6462
6463
if (!rtnl_net_trylock(net))
6464
return restart_syscall();
6465
6466
if (valp == &net->ipv6.devconf_dflt->proxy_ndp) {
6467
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6468
NETCONFA_PROXY_NEIGH,
6469
NETCONFA_IFINDEX_DEFAULT,
6470
net->ipv6.devconf_dflt);
6471
} else if (valp == &net->ipv6.devconf_all->proxy_ndp) {
6472
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6473
NETCONFA_PROXY_NEIGH,
6474
NETCONFA_IFINDEX_ALL,
6475
net->ipv6.devconf_all);
6476
} else {
6477
struct inet6_dev *idev = ctl->extra1;
6478
6479
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6480
NETCONFA_PROXY_NEIGH,
6481
idev->dev->ifindex,
6482
&idev->cnf);
6483
}
6484
rtnl_net_unlock(net);
6485
}
6486
6487
return ret;
6488
}
6489
6490
static int addrconf_sysctl_addr_gen_mode(const struct ctl_table *ctl, int write,
6491
void *buffer, size_t *lenp,
6492
loff_t *ppos)
6493
{
6494
int ret = 0;
6495
u32 new_val;
6496
struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
6497
struct net *net = (struct net *)ctl->extra2;
6498
struct ctl_table tmp = {
6499
.data = &new_val,
6500
.maxlen = sizeof(new_val),
6501
.mode = ctl->mode,
6502
};
6503
6504
if (!rtnl_net_trylock(net))
6505
return restart_syscall();
6506
6507
new_val = *((u32 *)ctl->data);
6508
6509
ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
6510
if (ret != 0)
6511
goto out;
6512
6513
if (write) {
6514
if (check_addr_gen_mode(new_val) < 0) {
6515
ret = -EINVAL;
6516
goto out;
6517
}
6518
6519
if (idev) {
6520
if (check_stable_privacy(idev, net, new_val) < 0) {
6521
ret = -EINVAL;
6522
goto out;
6523
}
6524
6525
if (idev->cnf.addr_gen_mode != new_val) {
6526
WRITE_ONCE(idev->cnf.addr_gen_mode, new_val);
6527
netdev_lock_ops(idev->dev);
6528
addrconf_init_auto_addrs(idev->dev);
6529
netdev_unlock_ops(idev->dev);
6530
}
6531
} else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
6532
struct net_device *dev;
6533
6534
WRITE_ONCE(net->ipv6.devconf_dflt->addr_gen_mode, new_val);
6535
for_each_netdev(net, dev) {
6536
idev = __in6_dev_get_rtnl_net(dev);
6537
if (idev &&
6538
idev->cnf.addr_gen_mode != new_val) {
6539
WRITE_ONCE(idev->cnf.addr_gen_mode,
6540
new_val);
6541
netdev_lock_ops(idev->dev);
6542
addrconf_init_auto_addrs(idev->dev);
6543
netdev_unlock_ops(idev->dev);
6544
}
6545
}
6546
}
6547
6548
WRITE_ONCE(*((u32 *)ctl->data), new_val);
6549
}
6550
6551
out:
6552
rtnl_net_unlock(net);
6553
6554
return ret;
6555
}
6556
6557
static int addrconf_sysctl_stable_secret(const struct ctl_table *ctl, int write,
6558
void *buffer, size_t *lenp,
6559
loff_t *ppos)
6560
{
6561
int err;
6562
struct in6_addr addr;
6563
char str[IPV6_MAX_STRLEN];
6564
struct ctl_table lctl = *ctl;
6565
struct net *net = ctl->extra2;
6566
struct ipv6_stable_secret *secret = ctl->data;
6567
6568
if (&net->ipv6.devconf_all->stable_secret == ctl->data)
6569
return -EIO;
6570
6571
lctl.maxlen = IPV6_MAX_STRLEN;
6572
lctl.data = str;
6573
6574
if (!rtnl_net_trylock(net))
6575
return restart_syscall();
6576
6577
if (!write && !secret->initialized) {
6578
err = -EIO;
6579
goto out;
6580
}
6581
6582
err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
6583
if (err >= sizeof(str)) {
6584
err = -EIO;
6585
goto out;
6586
}
6587
6588
err = proc_dostring(&lctl, write, buffer, lenp, ppos);
6589
if (err || !write)
6590
goto out;
6591
6592
if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
6593
err = -EIO;
6594
goto out;
6595
}
6596
6597
secret->initialized = true;
6598
secret->secret = addr;
6599
6600
if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
6601
struct net_device *dev;
6602
6603
for_each_netdev(net, dev) {
6604
struct inet6_dev *idev = __in6_dev_get_rtnl_net(dev);
6605
6606
if (idev) {
6607
WRITE_ONCE(idev->cnf.addr_gen_mode,
6608
IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
6609
}
6610
}
6611
} else {
6612
struct inet6_dev *idev = ctl->extra1;
6613
6614
WRITE_ONCE(idev->cnf.addr_gen_mode,
6615
IN6_ADDR_GEN_MODE_STABLE_PRIVACY);
6616
}
6617
6618
out:
6619
rtnl_net_unlock(net);
6620
6621
return err;
6622
}
6623
6624
static
6625
int addrconf_sysctl_ignore_routes_with_linkdown(const struct ctl_table *ctl,
6626
int write, void *buffer,
6627
size_t *lenp,
6628
loff_t *ppos)
6629
{
6630
int *valp = ctl->data;
6631
int val = *valp;
6632
loff_t pos = *ppos;
6633
struct ctl_table lctl;
6634
int ret;
6635
6636
/* ctl->data points to idev->cnf.ignore_routes_when_linkdown
6637
* we should not modify it until we get the rtnl lock.
6638
*/
6639
lctl = *ctl;
6640
lctl.data = &val;
6641
6642
ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6643
6644
if (write)
6645
ret = addrconf_fixup_linkdown(ctl, valp, val);
6646
if (ret)
6647
*ppos = pos;
6648
return ret;
6649
}
6650
6651
static
6652
void addrconf_set_nopolicy(struct rt6_info *rt, int action)
6653
{
6654
if (rt) {
6655
if (action)
6656
rt->dst.flags |= DST_NOPOLICY;
6657
else
6658
rt->dst.flags &= ~DST_NOPOLICY;
6659
}
6660
}
6661
6662
static
6663
void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
6664
{
6665
struct inet6_ifaddr *ifa;
6666
6667
read_lock_bh(&idev->lock);
6668
list_for_each_entry(ifa, &idev->addr_list, if_list) {
6669
spin_lock(&ifa->lock);
6670
if (ifa->rt) {
6671
/* host routes only use builtin fib6_nh */
6672
struct fib6_nh *nh = ifa->rt->fib6_nh;
6673
int cpu;
6674
6675
rcu_read_lock();
6676
ifa->rt->dst_nopolicy = val ? true : false;
6677
if (nh->rt6i_pcpu) {
6678
for_each_possible_cpu(cpu) {
6679
struct rt6_info **rtp;
6680
6681
rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu);
6682
addrconf_set_nopolicy(*rtp, val);
6683
}
6684
}
6685
rcu_read_unlock();
6686
}
6687
spin_unlock(&ifa->lock);
6688
}
6689
read_unlock_bh(&idev->lock);
6690
}
6691
6692
static
6693
int addrconf_disable_policy(const struct ctl_table *ctl, int *valp, int val)
6694
{
6695
struct net *net = (struct net *)ctl->extra2;
6696
struct inet6_dev *idev;
6697
6698
if (valp == &net->ipv6.devconf_dflt->disable_policy) {
6699
WRITE_ONCE(*valp, val);
6700
return 0;
6701
}
6702
6703
if (!rtnl_net_trylock(net))
6704
return restart_syscall();
6705
6706
WRITE_ONCE(*valp, val);
6707
6708
if (valp == &net->ipv6.devconf_all->disable_policy) {
6709
struct net_device *dev;
6710
6711
for_each_netdev(net, dev) {
6712
idev = __in6_dev_get_rtnl_net(dev);
6713
if (idev)
6714
addrconf_disable_policy_idev(idev, val);
6715
}
6716
} else {
6717
idev = (struct inet6_dev *)ctl->extra1;
6718
addrconf_disable_policy_idev(idev, val);
6719
}
6720
6721
rtnl_net_unlock(net);
6722
return 0;
6723
}
6724
6725
static int addrconf_sysctl_disable_policy(const struct ctl_table *ctl, int write,
6726
void *buffer, size_t *lenp, loff_t *ppos)
6727
{
6728
int *valp = ctl->data;
6729
int val = *valp;
6730
loff_t pos = *ppos;
6731
struct ctl_table lctl;
6732
int ret;
6733
6734
lctl = *ctl;
6735
lctl.data = &val;
6736
ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6737
6738
if (write && (*valp != val))
6739
ret = addrconf_disable_policy(ctl, valp, val);
6740
6741
if (ret)
6742
*ppos = pos;
6743
6744
return ret;
6745
}
6746
6747
static void addrconf_force_forward_change(struct net *net, __s32 newf)
6748
{
6749
struct net_device *dev;
6750
struct inet6_dev *idev;
6751
6752
for_each_netdev(net, dev) {
6753
idev = __in6_dev_get_rtnl_net(dev);
6754
if (idev) {
6755
int changed = (!idev->cnf.force_forwarding) ^ (!newf);
6756
6757
WRITE_ONCE(idev->cnf.force_forwarding, newf);
6758
if (changed)
6759
inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
6760
NETCONFA_FORCE_FORWARDING,
6761
dev->ifindex, &idev->cnf);
6762
}
6763
}
6764
}
6765
6766
static int addrconf_sysctl_force_forwarding(const struct ctl_table *ctl, int write,
6767
void *buffer, size_t *lenp, loff_t *ppos)
6768
{
6769
struct inet6_dev *idev = ctl->extra1;
6770
struct ctl_table tmp_ctl = *ctl;
6771
struct net *net = ctl->extra2;
6772
int *valp = ctl->data;
6773
int new_val = *valp;
6774
int old_val = *valp;
6775
loff_t pos = *ppos;
6776
int ret;
6777
6778
tmp_ctl.extra1 = SYSCTL_ZERO;
6779
tmp_ctl.extra2 = SYSCTL_ONE;
6780
tmp_ctl.data = &new_val;
6781
6782
ret = proc_douintvec_minmax(&tmp_ctl, write, buffer, lenp, ppos);
6783
6784
if (write && old_val != new_val) {
6785
if (!rtnl_net_trylock(net))
6786
return restart_syscall();
6787
6788
WRITE_ONCE(*valp, new_val);
6789
6790
if (valp == &net->ipv6.devconf_dflt->force_forwarding) {
6791
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6792
NETCONFA_FORCE_FORWARDING,
6793
NETCONFA_IFINDEX_DEFAULT,
6794
net->ipv6.devconf_dflt);
6795
} else if (valp == &net->ipv6.devconf_all->force_forwarding) {
6796
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6797
NETCONFA_FORCE_FORWARDING,
6798
NETCONFA_IFINDEX_ALL,
6799
net->ipv6.devconf_all);
6800
6801
addrconf_force_forward_change(net, new_val);
6802
} else {
6803
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6804
NETCONFA_FORCE_FORWARDING,
6805
idev->dev->ifindex,
6806
&idev->cnf);
6807
}
6808
rtnl_net_unlock(net);
6809
}
6810
6811
if (ret)
6812
*ppos = pos;
6813
return ret;
6814
}
6815
6816
static int minus_one = -1;
6817
static const int two_five_five = 255;
6818
static u32 ioam6_if_id_max = U16_MAX;
6819
6820
static const struct ctl_table addrconf_sysctl[] = {
6821
{
6822
.procname = "forwarding",
6823
.data = &ipv6_devconf.forwarding,
6824
.maxlen = sizeof(int),
6825
.mode = 0644,
6826
.proc_handler = addrconf_sysctl_forward,
6827
},
6828
{
6829
.procname = "hop_limit",
6830
.data = &ipv6_devconf.hop_limit,
6831
.maxlen = sizeof(int),
6832
.mode = 0644,
6833
.proc_handler = proc_dointvec_minmax,
6834
.extra1 = (void *)SYSCTL_ONE,
6835
.extra2 = (void *)&two_five_five,
6836
},
6837
{
6838
.procname = "mtu",
6839
.data = &ipv6_devconf.mtu6,
6840
.maxlen = sizeof(int),
6841
.mode = 0644,
6842
.proc_handler = addrconf_sysctl_mtu,
6843
},
6844
{
6845
.procname = "accept_ra",
6846
.data = &ipv6_devconf.accept_ra,
6847
.maxlen = sizeof(int),
6848
.mode = 0644,
6849
.proc_handler = proc_dointvec,
6850
},
6851
{
6852
.procname = "accept_redirects",
6853
.data = &ipv6_devconf.accept_redirects,
6854
.maxlen = sizeof(int),
6855
.mode = 0644,
6856
.proc_handler = proc_dointvec,
6857
},
6858
{
6859
.procname = "autoconf",
6860
.data = &ipv6_devconf.autoconf,
6861
.maxlen = sizeof(int),
6862
.mode = 0644,
6863
.proc_handler = proc_dointvec,
6864
},
6865
{
6866
.procname = "dad_transmits",
6867
.data = &ipv6_devconf.dad_transmits,
6868
.maxlen = sizeof(int),
6869
.mode = 0644,
6870
.proc_handler = proc_dointvec,
6871
},
6872
{
6873
.procname = "router_solicitations",
6874
.data = &ipv6_devconf.rtr_solicits,
6875
.maxlen = sizeof(int),
6876
.mode = 0644,
6877
.proc_handler = proc_dointvec_minmax,
6878
.extra1 = &minus_one,
6879
},
6880
{
6881
.procname = "router_solicitation_interval",
6882
.data = &ipv6_devconf.rtr_solicit_interval,
6883
.maxlen = sizeof(int),
6884
.mode = 0644,
6885
.proc_handler = proc_dointvec_jiffies,
6886
},
6887
{
6888
.procname = "router_solicitation_max_interval",
6889
.data = &ipv6_devconf.rtr_solicit_max_interval,
6890
.maxlen = sizeof(int),
6891
.mode = 0644,
6892
.proc_handler = proc_dointvec_jiffies,
6893
},
6894
{
6895
.procname = "router_solicitation_delay",
6896
.data = &ipv6_devconf.rtr_solicit_delay,
6897
.maxlen = sizeof(int),
6898
.mode = 0644,
6899
.proc_handler = proc_dointvec_jiffies,
6900
},
6901
{
6902
.procname = "force_mld_version",
6903
.data = &ipv6_devconf.force_mld_version,
6904
.maxlen = sizeof(int),
6905
.mode = 0644,
6906
.proc_handler = proc_dointvec,
6907
},
6908
{
6909
.procname = "mldv1_unsolicited_report_interval",
6910
.data =
6911
&ipv6_devconf.mldv1_unsolicited_report_interval,
6912
.maxlen = sizeof(int),
6913
.mode = 0644,
6914
.proc_handler = proc_dointvec_ms_jiffies,
6915
},
6916
{
6917
.procname = "mldv2_unsolicited_report_interval",
6918
.data =
6919
&ipv6_devconf.mldv2_unsolicited_report_interval,
6920
.maxlen = sizeof(int),
6921
.mode = 0644,
6922
.proc_handler = proc_dointvec_ms_jiffies,
6923
},
6924
{
6925
.procname = "use_tempaddr",
6926
.data = &ipv6_devconf.use_tempaddr,
6927
.maxlen = sizeof(int),
6928
.mode = 0644,
6929
.proc_handler = proc_dointvec,
6930
},
6931
{
6932
.procname = "temp_valid_lft",
6933
.data = &ipv6_devconf.temp_valid_lft,
6934
.maxlen = sizeof(int),
6935
.mode = 0644,
6936
.proc_handler = proc_dointvec,
6937
},
6938
{
6939
.procname = "temp_prefered_lft",
6940
.data = &ipv6_devconf.temp_prefered_lft,
6941
.maxlen = sizeof(int),
6942
.mode = 0644,
6943
.proc_handler = proc_dointvec,
6944
},
6945
{
6946
.procname = "regen_min_advance",
6947
.data = &ipv6_devconf.regen_min_advance,
6948
.maxlen = sizeof(int),
6949
.mode = 0644,
6950
.proc_handler = proc_dointvec,
6951
},
6952
{
6953
.procname = "regen_max_retry",
6954
.data = &ipv6_devconf.regen_max_retry,
6955
.maxlen = sizeof(int),
6956
.mode = 0644,
6957
.proc_handler = proc_dointvec,
6958
},
6959
{
6960
.procname = "max_desync_factor",
6961
.data = &ipv6_devconf.max_desync_factor,
6962
.maxlen = sizeof(int),
6963
.mode = 0644,
6964
.proc_handler = proc_dointvec,
6965
},
6966
{
6967
.procname = "max_addresses",
6968
.data = &ipv6_devconf.max_addresses,
6969
.maxlen = sizeof(int),
6970
.mode = 0644,
6971
.proc_handler = proc_dointvec,
6972
},
6973
{
6974
.procname = "accept_ra_defrtr",
6975
.data = &ipv6_devconf.accept_ra_defrtr,
6976
.maxlen = sizeof(int),
6977
.mode = 0644,
6978
.proc_handler = proc_dointvec,
6979
},
6980
{
6981
.procname = "ra_defrtr_metric",
6982
.data = &ipv6_devconf.ra_defrtr_metric,
6983
.maxlen = sizeof(u32),
6984
.mode = 0644,
6985
.proc_handler = proc_douintvec_minmax,
6986
.extra1 = (void *)SYSCTL_ONE,
6987
},
6988
{
6989
.procname = "accept_ra_min_hop_limit",
6990
.data = &ipv6_devconf.accept_ra_min_hop_limit,
6991
.maxlen = sizeof(int),
6992
.mode = 0644,
6993
.proc_handler = proc_dointvec,
6994
},
6995
{
6996
.procname = "accept_ra_min_lft",
6997
.data = &ipv6_devconf.accept_ra_min_lft,
6998
.maxlen = sizeof(int),
6999
.mode = 0644,
7000
.proc_handler = proc_dointvec,
7001
},
7002
{
7003
.procname = "accept_ra_pinfo",
7004
.data = &ipv6_devconf.accept_ra_pinfo,
7005
.maxlen = sizeof(int),
7006
.mode = 0644,
7007
.proc_handler = proc_dointvec,
7008
},
7009
{
7010
.procname = "ra_honor_pio_life",
7011
.data = &ipv6_devconf.ra_honor_pio_life,
7012
.maxlen = sizeof(u8),
7013
.mode = 0644,
7014
.proc_handler = proc_dou8vec_minmax,
7015
.extra1 = SYSCTL_ZERO,
7016
.extra2 = SYSCTL_ONE,
7017
},
7018
{
7019
.procname = "ra_honor_pio_pflag",
7020
.data = &ipv6_devconf.ra_honor_pio_pflag,
7021
.maxlen = sizeof(u8),
7022
.mode = 0644,
7023
.proc_handler = proc_dou8vec_minmax,
7024
.extra1 = SYSCTL_ZERO,
7025
.extra2 = SYSCTL_ONE,
7026
},
7027
#ifdef CONFIG_IPV6_ROUTER_PREF
7028
{
7029
.procname = "accept_ra_rtr_pref",
7030
.data = &ipv6_devconf.accept_ra_rtr_pref,
7031
.maxlen = sizeof(int),
7032
.mode = 0644,
7033
.proc_handler = proc_dointvec,
7034
},
7035
{
7036
.procname = "router_probe_interval",
7037
.data = &ipv6_devconf.rtr_probe_interval,
7038
.maxlen = sizeof(int),
7039
.mode = 0644,
7040
.proc_handler = proc_dointvec_jiffies,
7041
},
7042
#ifdef CONFIG_IPV6_ROUTE_INFO
7043
{
7044
.procname = "accept_ra_rt_info_min_plen",
7045
.data = &ipv6_devconf.accept_ra_rt_info_min_plen,
7046
.maxlen = sizeof(int),
7047
.mode = 0644,
7048
.proc_handler = proc_dointvec,
7049
},
7050
{
7051
.procname = "accept_ra_rt_info_max_plen",
7052
.data = &ipv6_devconf.accept_ra_rt_info_max_plen,
7053
.maxlen = sizeof(int),
7054
.mode = 0644,
7055
.proc_handler = proc_dointvec,
7056
},
7057
#endif
7058
#endif
7059
{
7060
.procname = "proxy_ndp",
7061
.data = &ipv6_devconf.proxy_ndp,
7062
.maxlen = sizeof(int),
7063
.mode = 0644,
7064
.proc_handler = addrconf_sysctl_proxy_ndp,
7065
},
7066
{
7067
.procname = "accept_source_route",
7068
.data = &ipv6_devconf.accept_source_route,
7069
.maxlen = sizeof(int),
7070
.mode = 0644,
7071
.proc_handler = proc_dointvec,
7072
},
7073
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
7074
{
7075
.procname = "optimistic_dad",
7076
.data = &ipv6_devconf.optimistic_dad,
7077
.maxlen = sizeof(int),
7078
.mode = 0644,
7079
.proc_handler = proc_dointvec,
7080
},
7081
{
7082
.procname = "use_optimistic",
7083
.data = &ipv6_devconf.use_optimistic,
7084
.maxlen = sizeof(int),
7085
.mode = 0644,
7086
.proc_handler = proc_dointvec,
7087
},
7088
#endif
7089
#ifdef CONFIG_IPV6_MROUTE
7090
{
7091
.procname = "mc_forwarding",
7092
.data = &ipv6_devconf.mc_forwarding,
7093
.maxlen = sizeof(int),
7094
.mode = 0444,
7095
.proc_handler = proc_dointvec,
7096
},
7097
#endif
7098
{
7099
.procname = "disable_ipv6",
7100
.data = &ipv6_devconf.disable_ipv6,
7101
.maxlen = sizeof(int),
7102
.mode = 0644,
7103
.proc_handler = addrconf_sysctl_disable,
7104
},
7105
{
7106
.procname = "accept_dad",
7107
.data = &ipv6_devconf.accept_dad,
7108
.maxlen = sizeof(int),
7109
.mode = 0644,
7110
.proc_handler = proc_dointvec,
7111
},
7112
{
7113
.procname = "force_tllao",
7114
.data = &ipv6_devconf.force_tllao,
7115
.maxlen = sizeof(int),
7116
.mode = 0644,
7117
.proc_handler = proc_dointvec
7118
},
7119
{
7120
.procname = "ndisc_notify",
7121
.data = &ipv6_devconf.ndisc_notify,
7122
.maxlen = sizeof(int),
7123
.mode = 0644,
7124
.proc_handler = proc_dointvec
7125
},
7126
{
7127
.procname = "suppress_frag_ndisc",
7128
.data = &ipv6_devconf.suppress_frag_ndisc,
7129
.maxlen = sizeof(int),
7130
.mode = 0644,
7131
.proc_handler = proc_dointvec
7132
},
7133
{
7134
.procname = "accept_ra_from_local",
7135
.data = &ipv6_devconf.accept_ra_from_local,
7136
.maxlen = sizeof(int),
7137
.mode = 0644,
7138
.proc_handler = proc_dointvec,
7139
},
7140
{
7141
.procname = "accept_ra_mtu",
7142
.data = &ipv6_devconf.accept_ra_mtu,
7143
.maxlen = sizeof(int),
7144
.mode = 0644,
7145
.proc_handler = proc_dointvec,
7146
},
7147
{
7148
.procname = "stable_secret",
7149
.data = &ipv6_devconf.stable_secret,
7150
.maxlen = IPV6_MAX_STRLEN,
7151
.mode = 0600,
7152
.proc_handler = addrconf_sysctl_stable_secret,
7153
},
7154
{
7155
.procname = "use_oif_addrs_only",
7156
.data = &ipv6_devconf.use_oif_addrs_only,
7157
.maxlen = sizeof(int),
7158
.mode = 0644,
7159
.proc_handler = proc_dointvec,
7160
},
7161
{
7162
.procname = "ignore_routes_with_linkdown",
7163
.data = &ipv6_devconf.ignore_routes_with_linkdown,
7164
.maxlen = sizeof(int),
7165
.mode = 0644,
7166
.proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
7167
},
7168
{
7169
.procname = "drop_unicast_in_l2_multicast",
7170
.data = &ipv6_devconf.drop_unicast_in_l2_multicast,
7171
.maxlen = sizeof(int),
7172
.mode = 0644,
7173
.proc_handler = proc_dointvec,
7174
},
7175
{
7176
.procname = "drop_unsolicited_na",
7177
.data = &ipv6_devconf.drop_unsolicited_na,
7178
.maxlen = sizeof(int),
7179
.mode = 0644,
7180
.proc_handler = proc_dointvec,
7181
},
7182
{
7183
.procname = "keep_addr_on_down",
7184
.data = &ipv6_devconf.keep_addr_on_down,
7185
.maxlen = sizeof(int),
7186
.mode = 0644,
7187
.proc_handler = proc_dointvec,
7188
7189
},
7190
{
7191
.procname = "seg6_enabled",
7192
.data = &ipv6_devconf.seg6_enabled,
7193
.maxlen = sizeof(int),
7194
.mode = 0644,
7195
.proc_handler = proc_dointvec,
7196
},
7197
#ifdef CONFIG_IPV6_SEG6_HMAC
7198
{
7199
.procname = "seg6_require_hmac",
7200
.data = &ipv6_devconf.seg6_require_hmac,
7201
.maxlen = sizeof(int),
7202
.mode = 0644,
7203
.proc_handler = proc_dointvec,
7204
},
7205
#endif
7206
{
7207
.procname = "enhanced_dad",
7208
.data = &ipv6_devconf.enhanced_dad,
7209
.maxlen = sizeof(int),
7210
.mode = 0644,
7211
.proc_handler = proc_dointvec,
7212
},
7213
{
7214
.procname = "addr_gen_mode",
7215
.data = &ipv6_devconf.addr_gen_mode,
7216
.maxlen = sizeof(int),
7217
.mode = 0644,
7218
.proc_handler = addrconf_sysctl_addr_gen_mode,
7219
},
7220
{
7221
.procname = "disable_policy",
7222
.data = &ipv6_devconf.disable_policy,
7223
.maxlen = sizeof(int),
7224
.mode = 0644,
7225
.proc_handler = addrconf_sysctl_disable_policy,
7226
},
7227
{
7228
.procname = "ndisc_tclass",
7229
.data = &ipv6_devconf.ndisc_tclass,
7230
.maxlen = sizeof(int),
7231
.mode = 0644,
7232
.proc_handler = proc_dointvec_minmax,
7233
.extra1 = (void *)SYSCTL_ZERO,
7234
.extra2 = (void *)&two_five_five,
7235
},
7236
{
7237
.procname = "rpl_seg_enabled",
7238
.data = &ipv6_devconf.rpl_seg_enabled,
7239
.maxlen = sizeof(int),
7240
.mode = 0644,
7241
.proc_handler = proc_dointvec,
7242
},
7243
{
7244
.procname = "ioam6_enabled",
7245
.data = &ipv6_devconf.ioam6_enabled,
7246
.maxlen = sizeof(u8),
7247
.mode = 0644,
7248
.proc_handler = proc_dou8vec_minmax,
7249
.extra1 = (void *)SYSCTL_ZERO,
7250
.extra2 = (void *)SYSCTL_ONE,
7251
},
7252
{
7253
.procname = "ioam6_id",
7254
.data = &ipv6_devconf.ioam6_id,
7255
.maxlen = sizeof(u32),
7256
.mode = 0644,
7257
.proc_handler = proc_douintvec_minmax,
7258
.extra1 = (void *)SYSCTL_ZERO,
7259
.extra2 = (void *)&ioam6_if_id_max,
7260
},
7261
{
7262
.procname = "ioam6_id_wide",
7263
.data = &ipv6_devconf.ioam6_id_wide,
7264
.maxlen = sizeof(u32),
7265
.mode = 0644,
7266
.proc_handler = proc_douintvec,
7267
},
7268
{
7269
.procname = "ndisc_evict_nocarrier",
7270
.data = &ipv6_devconf.ndisc_evict_nocarrier,
7271
.maxlen = sizeof(u8),
7272
.mode = 0644,
7273
.proc_handler = proc_dou8vec_minmax,
7274
.extra1 = (void *)SYSCTL_ZERO,
7275
.extra2 = (void *)SYSCTL_ONE,
7276
},
7277
{
7278
.procname = "accept_untracked_na",
7279
.data = &ipv6_devconf.accept_untracked_na,
7280
.maxlen = sizeof(int),
7281
.mode = 0644,
7282
.proc_handler = proc_dointvec_minmax,
7283
.extra1 = SYSCTL_ZERO,
7284
.extra2 = SYSCTL_TWO,
7285
},
7286
{
7287
.procname = "force_forwarding",
7288
.data = &ipv6_devconf.force_forwarding,
7289
.maxlen = sizeof(int),
7290
.mode = 0644,
7291
.proc_handler = addrconf_sysctl_force_forwarding,
7292
},
7293
};
7294
7295
static int __addrconf_sysctl_register(struct net *net, char *dev_name,
7296
struct inet6_dev *idev, struct ipv6_devconf *p)
7297
{
7298
size_t table_size = ARRAY_SIZE(addrconf_sysctl);
7299
int i, ifindex;
7300
struct ctl_table *table;
7301
char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
7302
7303
table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL_ACCOUNT);
7304
if (!table)
7305
goto out;
7306
7307
for (i = 0; i < table_size; i++) {
7308
table[i].data += (char *)p - (char *)&ipv6_devconf;
7309
/* If one of these is already set, then it is not safe to
7310
* overwrite either of them: this makes proc_dointvec_minmax
7311
* usable.
7312
*/
7313
if (!table[i].extra1 && !table[i].extra2) {
7314
table[i].extra1 = idev; /* embedded; no ref */
7315
table[i].extra2 = net;
7316
}
7317
}
7318
7319
snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
7320
7321
p->sysctl_header = register_net_sysctl_sz(net, path, table,
7322
table_size);
7323
if (!p->sysctl_header)
7324
goto free;
7325
7326
if (!strcmp(dev_name, "all"))
7327
ifindex = NETCONFA_IFINDEX_ALL;
7328
else if (!strcmp(dev_name, "default"))
7329
ifindex = NETCONFA_IFINDEX_DEFAULT;
7330
else
7331
ifindex = idev->dev->ifindex;
7332
inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
7333
ifindex, p);
7334
return 0;
7335
7336
free:
7337
kfree(table);
7338
out:
7339
return -ENOBUFS;
7340
}
7341
7342
static void __addrconf_sysctl_unregister(struct net *net,
7343
struct ipv6_devconf *p, int ifindex)
7344
{
7345
const struct ctl_table *table;
7346
7347
if (!p->sysctl_header)
7348
return;
7349
7350
table = p->sysctl_header->ctl_table_arg;
7351
unregister_net_sysctl_table(p->sysctl_header);
7352
p->sysctl_header = NULL;
7353
kfree(table);
7354
7355
inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
7356
}
7357
7358
static int addrconf_sysctl_register(struct inet6_dev *idev)
7359
{
7360
int err;
7361
7362
if (!sysctl_dev_name_is_allowed(idev->dev->name))
7363
return -EINVAL;
7364
7365
err = neigh_sysctl_register(idev->dev, idev->nd_parms,
7366
&ndisc_ifinfo_sysctl_change);
7367
if (err)
7368
return err;
7369
err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
7370
idev, &idev->cnf);
7371
if (err)
7372
neigh_sysctl_unregister(idev->nd_parms);
7373
7374
return err;
7375
}
7376
7377
static void addrconf_sysctl_unregister(struct inet6_dev *idev)
7378
{
7379
__addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
7380
idev->dev->ifindex);
7381
neigh_sysctl_unregister(idev->nd_parms);
7382
}
7383
7384
7385
#endif
7386
7387
static int __net_init addrconf_init_net(struct net *net)
7388
{
7389
int err = -ENOMEM;
7390
struct ipv6_devconf *all, *dflt;
7391
7392
spin_lock_init(&net->ipv6.addrconf_hash_lock);
7393
INIT_DEFERRABLE_WORK(&net->ipv6.addr_chk_work, addrconf_verify_work);
7394
net->ipv6.inet6_addr_lst = kcalloc(IN6_ADDR_HSIZE,
7395
sizeof(struct hlist_head),
7396
GFP_KERNEL);
7397
if (!net->ipv6.inet6_addr_lst)
7398
goto err_alloc_addr;
7399
7400
all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
7401
if (!all)
7402
goto err_alloc_all;
7403
7404
dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
7405
if (!dflt)
7406
goto err_alloc_dflt;
7407
7408
if (!net_eq(net, &init_net)) {
7409
switch (net_inherit_devconf()) {
7410
case 1: /* copy from init_net */
7411
memcpy(all, init_net.ipv6.devconf_all,
7412
sizeof(ipv6_devconf));
7413
memcpy(dflt, init_net.ipv6.devconf_dflt,
7414
sizeof(ipv6_devconf_dflt));
7415
break;
7416
case 3: /* copy from the current netns */
7417
memcpy(all, current->nsproxy->net_ns->ipv6.devconf_all,
7418
sizeof(ipv6_devconf));
7419
memcpy(dflt,
7420
current->nsproxy->net_ns->ipv6.devconf_dflt,
7421
sizeof(ipv6_devconf_dflt));
7422
break;
7423
case 0:
7424
case 2:
7425
/* use compiled values */
7426
break;
7427
}
7428
}
7429
7430
/* these will be inherited by all namespaces */
7431
dflt->autoconf = ipv6_defaults.autoconf;
7432
dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
7433
7434
dflt->stable_secret.initialized = false;
7435
all->stable_secret.initialized = false;
7436
7437
net->ipv6.devconf_all = all;
7438
net->ipv6.devconf_dflt = dflt;
7439
7440
#ifdef CONFIG_SYSCTL
7441
err = __addrconf_sysctl_register(net, "all", NULL, all);
7442
if (err < 0)
7443
goto err_reg_all;
7444
7445
err = __addrconf_sysctl_register(net, "default", NULL, dflt);
7446
if (err < 0)
7447
goto err_reg_dflt;
7448
#endif
7449
return 0;
7450
7451
#ifdef CONFIG_SYSCTL
7452
err_reg_dflt:
7453
__addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
7454
err_reg_all:
7455
kfree(dflt);
7456
net->ipv6.devconf_dflt = NULL;
7457
#endif
7458
err_alloc_dflt:
7459
kfree(all);
7460
net->ipv6.devconf_all = NULL;
7461
err_alloc_all:
7462
kfree(net->ipv6.inet6_addr_lst);
7463
err_alloc_addr:
7464
return err;
7465
}
7466
7467
static void __net_exit addrconf_exit_net(struct net *net)
7468
{
7469
int i;
7470
7471
#ifdef CONFIG_SYSCTL
7472
__addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
7473
NETCONFA_IFINDEX_DEFAULT);
7474
__addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
7475
NETCONFA_IFINDEX_ALL);
7476
#endif
7477
kfree(net->ipv6.devconf_dflt);
7478
net->ipv6.devconf_dflt = NULL;
7479
kfree(net->ipv6.devconf_all);
7480
net->ipv6.devconf_all = NULL;
7481
7482
cancel_delayed_work_sync(&net->ipv6.addr_chk_work);
7483
/*
7484
* Check hash table, then free it.
7485
*/
7486
for (i = 0; i < IN6_ADDR_HSIZE; i++)
7487
WARN_ON_ONCE(!hlist_empty(&net->ipv6.inet6_addr_lst[i]));
7488
7489
kfree(net->ipv6.inet6_addr_lst);
7490
net->ipv6.inet6_addr_lst = NULL;
7491
}
7492
7493
static struct pernet_operations addrconf_ops = {
7494
.init = addrconf_init_net,
7495
.exit = addrconf_exit_net,
7496
};
7497
7498
static struct rtnl_af_ops inet6_ops __read_mostly = {
7499
.family = AF_INET6,
7500
.fill_link_af = inet6_fill_link_af,
7501
.get_link_af_size = inet6_get_link_af_size,
7502
.validate_link_af = inet6_validate_link_af,
7503
.set_link_af = inet6_set_link_af,
7504
};
7505
7506
static const struct rtnl_msg_handler addrconf_rtnl_msg_handlers[] __initconst_or_module = {
7507
{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETLINK,
7508
.dumpit = inet6_dump_ifinfo, .flags = RTNL_FLAG_DUMP_UNLOCKED},
7509
{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWADDR,
7510
.doit = inet6_rtm_newaddr, .flags = RTNL_FLAG_DOIT_PERNET},
7511
{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELADDR,
7512
.doit = inet6_rtm_deladdr, .flags = RTNL_FLAG_DOIT_PERNET},
7513
{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETADDR,
7514
.doit = inet6_rtm_getaddr, .dumpit = inet6_dump_ifaddr,
7515
.flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
7516
{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETMULTICAST,
7517
.dumpit = inet6_dump_ifmcaddr,
7518
.flags = RTNL_FLAG_DUMP_UNLOCKED},
7519
{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETANYCAST,
7520
.dumpit = inet6_dump_ifacaddr,
7521
.flags = RTNL_FLAG_DUMP_UNLOCKED},
7522
{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETNETCONF,
7523
.doit = inet6_netconf_get_devconf, .dumpit = inet6_netconf_dump_devconf,
7524
.flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED},
7525
};
7526
7527
/*
7528
* Init / cleanup code
7529
*/
7530
7531
int __init addrconf_init(void)
7532
{
7533
struct inet6_dev *idev;
7534
int err;
7535
7536
err = ipv6_addr_label_init();
7537
if (err < 0) {
7538
pr_crit("%s: cannot initialize default policy table: %d\n",
7539
__func__, err);
7540
goto out;
7541
}
7542
7543
err = register_pernet_subsys(&addrconf_ops);
7544
if (err < 0)
7545
goto out_addrlabel;
7546
7547
/* All works using addrconf_wq need to lock rtnl. */
7548
addrconf_wq = create_singlethread_workqueue("ipv6_addrconf");
7549
if (!addrconf_wq) {
7550
err = -ENOMEM;
7551
goto out_nowq;
7552
}
7553
7554
rtnl_net_lock(&init_net);
7555
idev = ipv6_add_dev(blackhole_netdev);
7556
rtnl_net_unlock(&init_net);
7557
if (IS_ERR(idev)) {
7558
err = PTR_ERR(idev);
7559
goto errlo;
7560
}
7561
7562
ip6_route_init_special_entries();
7563
7564
register_netdevice_notifier(&ipv6_dev_notf);
7565
7566
addrconf_verify(&init_net);
7567
7568
err = rtnl_af_register(&inet6_ops);
7569
if (err)
7570
goto erraf;
7571
7572
err = rtnl_register_many(addrconf_rtnl_msg_handlers);
7573
if (err)
7574
goto errout;
7575
7576
err = ipv6_addr_label_rtnl_register();
7577
if (err < 0)
7578
goto errout;
7579
7580
return 0;
7581
errout:
7582
rtnl_unregister_all(PF_INET6);
7583
rtnl_af_unregister(&inet6_ops);
7584
erraf:
7585
unregister_netdevice_notifier(&ipv6_dev_notf);
7586
errlo:
7587
destroy_workqueue(addrconf_wq);
7588
out_nowq:
7589
unregister_pernet_subsys(&addrconf_ops);
7590
out_addrlabel:
7591
ipv6_addr_label_cleanup();
7592
out:
7593
return err;
7594
}
7595
7596
void addrconf_cleanup(void)
7597
{
7598
struct net_device *dev;
7599
7600
unregister_netdevice_notifier(&ipv6_dev_notf);
7601
unregister_pernet_subsys(&addrconf_ops);
7602
ipv6_addr_label_cleanup();
7603
7604
rtnl_af_unregister(&inet6_ops);
7605
7606
rtnl_net_lock(&init_net);
7607
7608
/* clean dev list */
7609
for_each_netdev(&init_net, dev) {
7610
if (!__in6_dev_get_rtnl_net(dev))
7611
continue;
7612
addrconf_ifdown(dev, true);
7613
}
7614
addrconf_ifdown(init_net.loopback_dev, true);
7615
7616
rtnl_net_unlock(&init_net);
7617
7618
destroy_workqueue(addrconf_wq);
7619
}
7620
7621