Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/ipv6/mcast.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Multicast support for IPv6
4
* Linux INET6 implementation
5
*
6
* Authors:
7
* Pedro Roque <[email protected]>
8
*
9
* Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
10
*/
11
12
/* Changes:
13
*
14
* yoshfuji : fix format of router-alert option
15
* YOSHIFUJI Hideaki @USAGI:
16
* Fixed source address for MLD message based on
17
* <draft-ietf-magma-mld-source-05.txt>.
18
* YOSHIFUJI Hideaki @USAGI:
19
* - Ignore Queries for invalid addresses.
20
* - MLD for link-local addresses.
21
* David L Stevens <[email protected]>:
22
* - MLDv2 support
23
*/
24
25
#include <linux/module.h>
26
#include <linux/errno.h>
27
#include <linux/types.h>
28
#include <linux/string.h>
29
#include <linux/socket.h>
30
#include <linux/sockios.h>
31
#include <linux/jiffies.h>
32
#include <linux/net.h>
33
#include <linux/in.h>
34
#include <linux/in6.h>
35
#include <linux/netdevice.h>
36
#include <linux/if_addr.h>
37
#include <linux/if_arp.h>
38
#include <linux/route.h>
39
#include <linux/rtnetlink.h>
40
#include <linux/init.h>
41
#include <linux/proc_fs.h>
42
#include <linux/seq_file.h>
43
#include <linux/slab.h>
44
#include <linux/pkt_sched.h>
45
#include <net/mld.h>
46
#include <linux/workqueue.h>
47
48
#include <linux/netfilter.h>
49
#include <linux/netfilter_ipv6.h>
50
51
#include <net/net_namespace.h>
52
#include <net/netlink.h>
53
#include <net/sock.h>
54
#include <net/snmp.h>
55
56
#include <net/ipv6.h>
57
#include <net/protocol.h>
58
#include <net/if_inet6.h>
59
#include <net/ndisc.h>
60
#include <net/addrconf.h>
61
#include <net/ip6_route.h>
62
#include <net/inet_common.h>
63
64
#include <net/ip6_checksum.h>
65
66
/* Ensure that we have struct in6_addr aligned on 32bit word. */
67
static int __mld2_query_bugs[] __attribute__((__unused__)) = {
68
BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4),
69
BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4),
70
BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4)
71
};
72
73
static struct workqueue_struct *mld_wq;
74
static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
75
76
static void igmp6_join_group(struct ifmcaddr6 *ma);
77
static void igmp6_leave_group(struct ifmcaddr6 *ma);
78
static void mld_mca_work(struct work_struct *work);
79
80
static void mld_ifc_event(struct inet6_dev *idev);
81
static bool mld_in_v1_mode(const struct inet6_dev *idev);
82
static int sf_setstate(struct ifmcaddr6 *pmc);
83
static void sf_markstate(struct ifmcaddr6 *pmc);
84
static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
85
static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
86
int sfmode, int sfcount, const struct in6_addr *psfsrc,
87
int delta);
88
static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
89
int sfmode, int sfcount, const struct in6_addr *psfsrc,
90
int delta);
91
static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
92
struct inet6_dev *idev);
93
static int __ipv6_dev_mc_inc(struct net_device *dev,
94
const struct in6_addr *addr, unsigned int mode);
95
96
#define MLD_QRV_DEFAULT 2
97
/* RFC3810, 9.2. Query Interval */
98
#define MLD_QI_DEFAULT (125 * HZ)
99
/* RFC3810, 9.3. Query Response Interval */
100
#define MLD_QRI_DEFAULT (10 * HZ)
101
102
/* RFC3810, 8.1 Query Version Distinctions */
103
#define MLD_V1_QUERY_LEN 24
104
#define MLD_V2_QUERY_LEN_MIN 28
105
106
#define IPV6_MLD_MAX_MSF 64
107
108
int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
109
int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
110
111
#define mc_assert_locked(idev) \
112
lockdep_assert_held(&(idev)->mc_lock)
113
114
#define mc_dereference(e, idev) \
115
rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
116
117
#define sock_dereference(e, sk) \
118
rcu_dereference_protected(e, lockdep_sock_is_held(sk))
119
120
#define for_each_pmc_socklock(np, sk, pmc) \
121
for (pmc = sock_dereference((np)->ipv6_mc_list, sk); \
122
pmc; \
123
pmc = sock_dereference(pmc->next, sk))
124
125
#define for_each_pmc_rcu(np, pmc) \
126
for (pmc = rcu_dereference((np)->ipv6_mc_list); \
127
pmc; \
128
pmc = rcu_dereference(pmc->next))
129
130
#define for_each_psf_mclock(mc, psf) \
131
for (psf = mc_dereference((mc)->mca_sources, mc->idev); \
132
psf; \
133
psf = mc_dereference(psf->sf_next, mc->idev))
134
135
#define for_each_psf_rcu(mc, psf) \
136
for (psf = rcu_dereference((mc)->mca_sources); \
137
psf; \
138
psf = rcu_dereference(psf->sf_next))
139
140
#define for_each_psf_tomb(mc, psf) \
141
for (psf = mc_dereference((mc)->mca_tomb, mc->idev); \
142
psf; \
143
psf = mc_dereference(psf->sf_next, mc->idev))
144
145
#define for_each_mc_mclock(idev, mc) \
146
for (mc = mc_dereference((idev)->mc_list, idev); \
147
mc; \
148
mc = mc_dereference(mc->next, idev))
149
150
#define for_each_mc_rcu(idev, mc) \
151
for (mc = rcu_dereference((idev)->mc_list); \
152
mc; \
153
mc = rcu_dereference(mc->next))
154
155
#define for_each_mc_tomb(idev, mc) \
156
for (mc = mc_dereference((idev)->mc_tomb, idev); \
157
mc; \
158
mc = mc_dereference(mc->next, idev))
159
160
static int unsolicited_report_interval(struct inet6_dev *idev)
161
{
162
int iv;
163
164
if (mld_in_v1_mode(idev))
165
iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval);
166
else
167
iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval);
168
169
return iv > 0 ? iv : 1;
170
}
171
172
/*
173
* socket join on multicast group
174
*/
175
static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
176
const struct in6_addr *addr, unsigned int mode)
177
{
178
struct ipv6_pinfo *np = inet6_sk(sk);
179
struct ipv6_mc_socklist *mc_lst;
180
struct net *net = sock_net(sk);
181
struct net_device *dev = NULL;
182
int err;
183
184
if (!ipv6_addr_is_multicast(addr))
185
return -EINVAL;
186
187
for_each_pmc_socklock(np, sk, mc_lst) {
188
if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
189
ipv6_addr_equal(&mc_lst->addr, addr))
190
return -EADDRINUSE;
191
}
192
193
mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
194
195
if (!mc_lst)
196
return -ENOMEM;
197
198
mc_lst->next = NULL;
199
mc_lst->addr = *addr;
200
201
if (ifindex == 0) {
202
struct rt6_info *rt;
203
204
rcu_read_lock();
205
rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
206
if (rt) {
207
dev = dst_dev(&rt->dst);
208
dev_hold(dev);
209
ip6_rt_put(rt);
210
}
211
rcu_read_unlock();
212
} else {
213
dev = dev_get_by_index(net, ifindex);
214
}
215
216
if (!dev) {
217
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
218
return -ENODEV;
219
}
220
221
mc_lst->ifindex = dev->ifindex;
222
mc_lst->sfmode = mode;
223
RCU_INIT_POINTER(mc_lst->sflist, NULL);
224
225
/* now add/increase the group membership on the device */
226
err = __ipv6_dev_mc_inc(dev, addr, mode);
227
228
dev_put(dev);
229
230
if (err) {
231
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
232
return err;
233
}
234
235
mc_lst->next = np->ipv6_mc_list;
236
rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
237
238
return 0;
239
}
240
241
int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
242
{
243
return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
244
}
245
EXPORT_SYMBOL(ipv6_sock_mc_join);
246
247
int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
248
const struct in6_addr *addr, unsigned int mode)
249
{
250
return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
251
}
252
253
/*
254
* socket leave on multicast group
255
*/
256
static void __ipv6_sock_mc_drop(struct sock *sk, struct ipv6_mc_socklist *mc_lst)
257
{
258
struct net *net = sock_net(sk);
259
struct net_device *dev;
260
261
dev = dev_get_by_index(net, mc_lst->ifindex);
262
if (dev) {
263
struct inet6_dev *idev = in6_dev_get(dev);
264
265
ip6_mc_leave_src(sk, mc_lst, idev);
266
267
if (idev) {
268
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
269
in6_dev_put(idev);
270
}
271
272
dev_put(dev);
273
} else {
274
ip6_mc_leave_src(sk, mc_lst, NULL);
275
}
276
277
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
278
kfree_rcu(mc_lst, rcu);
279
}
280
281
int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
282
{
283
struct ipv6_pinfo *np = inet6_sk(sk);
284
struct ipv6_mc_socklist __rcu **lnk;
285
struct ipv6_mc_socklist *mc_lst;
286
287
if (!ipv6_addr_is_multicast(addr))
288
return -EINVAL;
289
290
for (lnk = &np->ipv6_mc_list;
291
(mc_lst = sock_dereference(*lnk, sk)) != NULL;
292
lnk = &mc_lst->next) {
293
if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
294
ipv6_addr_equal(&mc_lst->addr, addr)) {
295
*lnk = mc_lst->next;
296
__ipv6_sock_mc_drop(sk, mc_lst);
297
return 0;
298
}
299
}
300
301
return -EADDRNOTAVAIL;
302
}
303
EXPORT_SYMBOL(ipv6_sock_mc_drop);
304
305
static struct inet6_dev *ip6_mc_find_dev(struct net *net,
306
const struct in6_addr *group,
307
int ifindex)
308
{
309
struct net_device *dev = NULL;
310
struct inet6_dev *idev;
311
312
if (ifindex == 0) {
313
struct rt6_info *rt;
314
315
rcu_read_lock();
316
rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
317
if (rt) {
318
dev = dst_dev(&rt->dst);
319
dev_hold(dev);
320
ip6_rt_put(rt);
321
}
322
rcu_read_unlock();
323
} else {
324
dev = dev_get_by_index(net, ifindex);
325
}
326
if (!dev)
327
return NULL;
328
329
idev = in6_dev_get(dev);
330
dev_put(dev);
331
332
return idev;
333
}
334
335
void __ipv6_sock_mc_close(struct sock *sk)
336
{
337
struct ipv6_pinfo *np = inet6_sk(sk);
338
struct ipv6_mc_socklist *mc_lst;
339
340
while ((mc_lst = sock_dereference(np->ipv6_mc_list, sk)) != NULL) {
341
np->ipv6_mc_list = mc_lst->next;
342
__ipv6_sock_mc_drop(sk, mc_lst);
343
}
344
}
345
346
void ipv6_sock_mc_close(struct sock *sk)
347
{
348
struct ipv6_pinfo *np = inet6_sk(sk);
349
350
if (!rcu_access_pointer(np->ipv6_mc_list))
351
return;
352
353
lock_sock(sk);
354
__ipv6_sock_mc_close(sk);
355
release_sock(sk);
356
}
357
358
int ip6_mc_source(int add, int omode, struct sock *sk,
359
struct group_source_req *pgsr)
360
{
361
struct ipv6_pinfo *inet6 = inet6_sk(sk);
362
struct in6_addr *source, *group;
363
struct net *net = sock_net(sk);
364
struct ipv6_mc_socklist *pmc;
365
struct ip6_sf_socklist *psl;
366
struct inet6_dev *idev;
367
int leavegroup = 0;
368
int i, j, rv;
369
int err;
370
371
source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
372
group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
373
374
if (!ipv6_addr_is_multicast(group))
375
return -EINVAL;
376
377
idev = ip6_mc_find_dev(net, group, pgsr->gsr_interface);
378
if (!idev)
379
return -ENODEV;
380
381
mutex_lock(&idev->mc_lock);
382
383
if (idev->dead) {
384
err = -ENODEV;
385
goto done;
386
}
387
388
err = -EADDRNOTAVAIL;
389
390
for_each_pmc_socklock(inet6, sk, pmc) {
391
if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
392
continue;
393
if (ipv6_addr_equal(&pmc->addr, group))
394
break;
395
}
396
if (!pmc) { /* must have a prior join */
397
err = -EINVAL;
398
goto done;
399
}
400
/* if a source filter was set, must be the same mode as before */
401
if (rcu_access_pointer(pmc->sflist)) {
402
if (pmc->sfmode != omode) {
403
err = -EINVAL;
404
goto done;
405
}
406
} else if (pmc->sfmode != omode) {
407
/* allow mode switches for empty-set filters */
408
ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
409
ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
410
pmc->sfmode = omode;
411
}
412
413
psl = sock_dereference(pmc->sflist, sk);
414
if (!add) {
415
if (!psl)
416
goto done; /* err = -EADDRNOTAVAIL */
417
rv = !0;
418
for (i = 0; i < psl->sl_count; i++) {
419
rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
420
if (rv == 0)
421
break;
422
}
423
if (rv) /* source not found */
424
goto done; /* err = -EADDRNOTAVAIL */
425
426
/* special case - (INCLUDE, empty) == LEAVE_GROUP */
427
if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
428
leavegroup = 1;
429
goto done;
430
}
431
432
/* update the interface filter */
433
ip6_mc_del_src(idev, group, omode, 1, source, 1);
434
435
for (j = i+1; j < psl->sl_count; j++)
436
psl->sl_addr[j-1] = psl->sl_addr[j];
437
psl->sl_count--;
438
err = 0;
439
goto done;
440
}
441
/* else, add a new source to the filter */
442
443
if (psl && psl->sl_count >= sysctl_mld_max_msf) {
444
err = -ENOBUFS;
445
goto done;
446
}
447
if (!psl || psl->sl_count == psl->sl_max) {
448
struct ip6_sf_socklist *newpsl;
449
int count = IP6_SFBLOCK;
450
451
if (psl)
452
count += psl->sl_max;
453
newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, count),
454
GFP_KERNEL);
455
if (!newpsl) {
456
err = -ENOBUFS;
457
goto done;
458
}
459
newpsl->sl_max = count;
460
newpsl->sl_count = count - IP6_SFBLOCK;
461
if (psl) {
462
for (i = 0; i < psl->sl_count; i++)
463
newpsl->sl_addr[i] = psl->sl_addr[i];
464
atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
465
&sk->sk_omem_alloc);
466
}
467
rcu_assign_pointer(pmc->sflist, newpsl);
468
kfree_rcu(psl, rcu);
469
psl = newpsl;
470
}
471
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
472
for (i = 0; i < psl->sl_count; i++) {
473
rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
474
if (rv == 0) /* There is an error in the address. */
475
goto done;
476
}
477
for (j = psl->sl_count-1; j >= i; j--)
478
psl->sl_addr[j+1] = psl->sl_addr[j];
479
psl->sl_addr[i] = *source;
480
psl->sl_count++;
481
err = 0;
482
/* update the interface list */
483
ip6_mc_add_src(idev, group, omode, 1, source, 1);
484
done:
485
mutex_unlock(&idev->mc_lock);
486
in6_dev_put(idev);
487
if (leavegroup)
488
err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
489
return err;
490
}
491
492
int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
493
struct sockaddr_storage *list)
494
{
495
struct ipv6_pinfo *inet6 = inet6_sk(sk);
496
struct ip6_sf_socklist *newpsl, *psl;
497
struct net *net = sock_net(sk);
498
const struct in6_addr *group;
499
struct ipv6_mc_socklist *pmc;
500
struct inet6_dev *idev;
501
int leavegroup = 0;
502
int i, err;
503
504
group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
505
506
if (!ipv6_addr_is_multicast(group))
507
return -EINVAL;
508
if (gsf->gf_fmode != MCAST_INCLUDE &&
509
gsf->gf_fmode != MCAST_EXCLUDE)
510
return -EINVAL;
511
512
idev = ip6_mc_find_dev(net, group, gsf->gf_interface);
513
if (!idev)
514
return -ENODEV;
515
516
mutex_lock(&idev->mc_lock);
517
518
if (idev->dead) {
519
err = -ENODEV;
520
goto done;
521
}
522
523
err = 0;
524
525
if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
526
leavegroup = 1;
527
goto done;
528
}
529
530
for_each_pmc_socklock(inet6, sk, pmc) {
531
if (pmc->ifindex != gsf->gf_interface)
532
continue;
533
if (ipv6_addr_equal(&pmc->addr, group))
534
break;
535
}
536
if (!pmc) { /* must have a prior join */
537
err = -EINVAL;
538
goto done;
539
}
540
if (gsf->gf_numsrc) {
541
newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr,
542
gsf->gf_numsrc),
543
GFP_KERNEL);
544
if (!newpsl) {
545
err = -ENOBUFS;
546
goto done;
547
}
548
newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
549
for (i = 0; i < newpsl->sl_count; ++i, ++list) {
550
struct sockaddr_in6 *psin6;
551
552
psin6 = (struct sockaddr_in6 *)list;
553
newpsl->sl_addr[i] = psin6->sin6_addr;
554
}
555
556
err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
557
newpsl->sl_count, newpsl->sl_addr, 0);
558
if (err) {
559
sock_kfree_s(sk, newpsl, struct_size(newpsl, sl_addr,
560
newpsl->sl_max));
561
goto done;
562
}
563
} else {
564
newpsl = NULL;
565
ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
566
}
567
568
psl = sock_dereference(pmc->sflist, sk);
569
if (psl) {
570
ip6_mc_del_src(idev, group, pmc->sfmode,
571
psl->sl_count, psl->sl_addr, 0);
572
atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
573
&sk->sk_omem_alloc);
574
} else {
575
ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
576
}
577
578
rcu_assign_pointer(pmc->sflist, newpsl);
579
kfree_rcu(psl, rcu);
580
pmc->sfmode = gsf->gf_fmode;
581
err = 0;
582
done:
583
mutex_unlock(&idev->mc_lock);
584
in6_dev_put(idev);
585
if (leavegroup)
586
err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
587
return err;
588
}
589
590
int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
591
sockptr_t optval, size_t ss_offset)
592
{
593
struct ipv6_pinfo *inet6 = inet6_sk(sk);
594
const struct in6_addr *group;
595
struct ipv6_mc_socklist *pmc;
596
struct ip6_sf_socklist *psl;
597
unsigned int count;
598
int i, copycount;
599
600
group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
601
602
if (!ipv6_addr_is_multicast(group))
603
return -EINVAL;
604
605
for_each_pmc_socklock(inet6, sk, pmc) {
606
if (pmc->ifindex != gsf->gf_interface)
607
continue;
608
if (ipv6_addr_equal(group, &pmc->addr))
609
break;
610
}
611
if (!pmc) /* must have a prior join */
612
return -EADDRNOTAVAIL;
613
614
gsf->gf_fmode = pmc->sfmode;
615
psl = sock_dereference(pmc->sflist, sk);
616
count = psl ? psl->sl_count : 0;
617
618
copycount = min(count, gsf->gf_numsrc);
619
gsf->gf_numsrc = count;
620
for (i = 0; i < copycount; i++) {
621
struct sockaddr_in6 *psin6;
622
struct sockaddr_storage ss;
623
624
psin6 = (struct sockaddr_in6 *)&ss;
625
memset(&ss, 0, sizeof(ss));
626
psin6->sin6_family = AF_INET6;
627
psin6->sin6_addr = psl->sl_addr[i];
628
if (copy_to_sockptr_offset(optval, ss_offset, &ss, sizeof(ss)))
629
return -EFAULT;
630
ss_offset += sizeof(ss);
631
}
632
return 0;
633
}
634
635
bool inet6_mc_check(const struct sock *sk, const struct in6_addr *mc_addr,
636
const struct in6_addr *src_addr)
637
{
638
const struct ipv6_pinfo *np = inet6_sk(sk);
639
const struct ipv6_mc_socklist *mc;
640
const struct ip6_sf_socklist *psl;
641
bool rv = true;
642
643
rcu_read_lock();
644
for_each_pmc_rcu(np, mc) {
645
if (ipv6_addr_equal(&mc->addr, mc_addr))
646
break;
647
}
648
if (!mc) {
649
rcu_read_unlock();
650
return inet6_test_bit(MC6_ALL, sk);
651
}
652
psl = rcu_dereference(mc->sflist);
653
if (!psl) {
654
rv = mc->sfmode == MCAST_EXCLUDE;
655
} else {
656
int i;
657
658
for (i = 0; i < psl->sl_count; i++) {
659
if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
660
break;
661
}
662
if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
663
rv = false;
664
if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
665
rv = false;
666
}
667
rcu_read_unlock();
668
669
return rv;
670
}
671
672
static void igmp6_group_added(struct ifmcaddr6 *mc)
673
{
674
struct net_device *dev = mc->idev->dev;
675
char buf[MAX_ADDR_LEN];
676
677
mc_assert_locked(mc->idev);
678
679
if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
680
IPV6_ADDR_SCOPE_LINKLOCAL)
681
return;
682
683
if (!(mc->mca_flags&MAF_LOADED)) {
684
mc->mca_flags |= MAF_LOADED;
685
if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
686
dev_mc_add(dev, buf);
687
}
688
689
if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
690
return;
691
692
if (mld_in_v1_mode(mc->idev)) {
693
igmp6_join_group(mc);
694
return;
695
}
696
/* else v2 */
697
698
/* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
699
* should not send filter-mode change record as the mode
700
* should be from IN() to IN(A).
701
*/
702
if (mc->mca_sfmode == MCAST_EXCLUDE)
703
mc->mca_crcount = mc->idev->mc_qrv;
704
705
mld_ifc_event(mc->idev);
706
}
707
708
static void igmp6_group_dropped(struct ifmcaddr6 *mc)
709
{
710
struct net_device *dev = mc->idev->dev;
711
char buf[MAX_ADDR_LEN];
712
713
mc_assert_locked(mc->idev);
714
715
if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
716
IPV6_ADDR_SCOPE_LINKLOCAL)
717
return;
718
719
if (mc->mca_flags&MAF_LOADED) {
720
mc->mca_flags &= ~MAF_LOADED;
721
if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
722
dev_mc_del(dev, buf);
723
}
724
725
if (mc->mca_flags & MAF_NOREPORT)
726
return;
727
728
if (!mc->idev->dead)
729
igmp6_leave_group(mc);
730
731
if (cancel_delayed_work(&mc->mca_work))
732
refcount_dec(&mc->mca_refcnt);
733
}
734
735
/* deleted ifmcaddr6 manipulation */
736
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
737
{
738
struct ifmcaddr6 *pmc;
739
740
mc_assert_locked(idev);
741
742
/* this is an "ifmcaddr6" for convenience; only the fields below
743
* are actually used. In particular, the refcnt and users are not
744
* used for management of the delete list. Using the same structure
745
* for deleted items allows change reports to use common code with
746
* non-deleted or query-response MCA's.
747
*/
748
pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
749
if (!pmc)
750
return;
751
752
pmc->idev = im->idev;
753
in6_dev_hold(idev);
754
pmc->mca_addr = im->mca_addr;
755
pmc->mca_crcount = idev->mc_qrv;
756
pmc->mca_sfmode = im->mca_sfmode;
757
if (pmc->mca_sfmode == MCAST_INCLUDE) {
758
struct ip6_sf_list *psf;
759
760
rcu_assign_pointer(pmc->mca_tomb,
761
mc_dereference(im->mca_tomb, idev));
762
rcu_assign_pointer(pmc->mca_sources,
763
mc_dereference(im->mca_sources, idev));
764
RCU_INIT_POINTER(im->mca_tomb, NULL);
765
RCU_INIT_POINTER(im->mca_sources, NULL);
766
767
for_each_psf_mclock(pmc, psf)
768
psf->sf_crcount = pmc->mca_crcount;
769
}
770
771
rcu_assign_pointer(pmc->next, idev->mc_tomb);
772
rcu_assign_pointer(idev->mc_tomb, pmc);
773
}
774
775
static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
776
{
777
struct ip6_sf_list *psf, *sources, *tomb;
778
struct in6_addr *pmca = &im->mca_addr;
779
struct ifmcaddr6 *pmc, *pmc_prev;
780
781
mc_assert_locked(idev);
782
783
pmc_prev = NULL;
784
for_each_mc_tomb(idev, pmc) {
785
if (ipv6_addr_equal(&pmc->mca_addr, pmca))
786
break;
787
pmc_prev = pmc;
788
}
789
if (!pmc)
790
return;
791
if (pmc_prev)
792
rcu_assign_pointer(pmc_prev->next, pmc->next);
793
else
794
rcu_assign_pointer(idev->mc_tomb, pmc->next);
795
796
im->idev = pmc->idev;
797
if (im->mca_sfmode == MCAST_INCLUDE) {
798
tomb = rcu_replace_pointer(im->mca_tomb,
799
mc_dereference(pmc->mca_tomb, pmc->idev),
800
lockdep_is_held(&im->idev->mc_lock));
801
rcu_assign_pointer(pmc->mca_tomb, tomb);
802
803
sources = rcu_replace_pointer(im->mca_sources,
804
mc_dereference(pmc->mca_sources, pmc->idev),
805
lockdep_is_held(&im->idev->mc_lock));
806
rcu_assign_pointer(pmc->mca_sources, sources);
807
for_each_psf_mclock(im, psf)
808
psf->sf_crcount = idev->mc_qrv;
809
} else {
810
im->mca_crcount = idev->mc_qrv;
811
}
812
ip6_mc_clear_src(pmc);
813
in6_dev_put(pmc->idev);
814
kfree_rcu(pmc, rcu);
815
}
816
817
static void mld_clear_delrec(struct inet6_dev *idev)
818
{
819
struct ifmcaddr6 *pmc, *nextpmc;
820
821
mc_assert_locked(idev);
822
823
pmc = mc_dereference(idev->mc_tomb, idev);
824
RCU_INIT_POINTER(idev->mc_tomb, NULL);
825
826
for (; pmc; pmc = nextpmc) {
827
nextpmc = mc_dereference(pmc->next, idev);
828
ip6_mc_clear_src(pmc);
829
in6_dev_put(pmc->idev);
830
kfree_rcu(pmc, rcu);
831
}
832
833
/* clear dead sources, too */
834
for_each_mc_mclock(idev, pmc) {
835
struct ip6_sf_list *psf, *psf_next;
836
837
psf = mc_dereference(pmc->mca_tomb, idev);
838
RCU_INIT_POINTER(pmc->mca_tomb, NULL);
839
for (; psf; psf = psf_next) {
840
psf_next = mc_dereference(psf->sf_next, idev);
841
kfree_rcu(psf, rcu);
842
}
843
}
844
}
845
846
static void mld_clear_query(struct inet6_dev *idev)
847
{
848
spin_lock_bh(&idev->mc_query_lock);
849
__skb_queue_purge(&idev->mc_query_queue);
850
spin_unlock_bh(&idev->mc_query_lock);
851
}
852
853
static void mld_clear_report(struct inet6_dev *idev)
854
{
855
spin_lock_bh(&idev->mc_report_lock);
856
__skb_queue_purge(&idev->mc_report_queue);
857
spin_unlock_bh(&idev->mc_report_lock);
858
}
859
860
static void ma_put(struct ifmcaddr6 *mc)
861
{
862
if (refcount_dec_and_test(&mc->mca_refcnt)) {
863
in6_dev_put(mc->idev);
864
kfree_rcu(mc, rcu);
865
}
866
}
867
868
static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
869
const struct in6_addr *addr,
870
unsigned int mode)
871
{
872
struct ifmcaddr6 *mc;
873
874
mc_assert_locked(idev);
875
876
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
877
if (!mc)
878
return NULL;
879
880
INIT_DELAYED_WORK(&mc->mca_work, mld_mca_work);
881
882
mc->mca_addr = *addr;
883
mc->idev = idev; /* reference taken by caller */
884
mc->mca_users = 1;
885
/* mca_stamp should be updated upon changes */
886
mc->mca_cstamp = mc->mca_tstamp = jiffies;
887
refcount_set(&mc->mca_refcnt, 1);
888
889
mc->mca_sfmode = mode;
890
mc->mca_sfcount[mode] = 1;
891
892
if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
893
IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
894
mc->mca_flags |= MAF_NOREPORT;
895
896
return mc;
897
}
898
899
static void inet6_ifmcaddr_notify(struct net_device *dev,
900
const struct ifmcaddr6 *ifmca, int event)
901
{
902
struct inet6_fill_args fillargs = {
903
.portid = 0,
904
.seq = 0,
905
.event = event,
906
.flags = 0,
907
.netnsid = -1,
908
.force_rt_scope_universe = true,
909
};
910
struct net *net = dev_net(dev);
911
struct sk_buff *skb;
912
int err = -ENOMEM;
913
914
skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
915
nla_total_size(sizeof(struct in6_addr)) +
916
nla_total_size(sizeof(struct ifa_cacheinfo)),
917
GFP_KERNEL);
918
if (!skb)
919
goto error;
920
921
err = inet6_fill_ifmcaddr(skb, ifmca, &fillargs);
922
if (err < 0) {
923
WARN_ON_ONCE(err == -EMSGSIZE);
924
nlmsg_free(skb);
925
goto error;
926
}
927
928
rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MCADDR, NULL, GFP_KERNEL);
929
return;
930
error:
931
rtnl_set_sk_err(net, RTNLGRP_IPV6_MCADDR, err);
932
}
933
934
/*
935
* device multicast group inc (add if not found)
936
*/
937
static int __ipv6_dev_mc_inc(struct net_device *dev,
938
const struct in6_addr *addr, unsigned int mode)
939
{
940
struct inet6_dev *idev;
941
struct ifmcaddr6 *mc;
942
943
/* we need to take a reference on idev */
944
idev = in6_dev_get(dev);
945
if (!idev)
946
return -EINVAL;
947
948
mutex_lock(&idev->mc_lock);
949
950
if (READ_ONCE(idev->dead)) {
951
mutex_unlock(&idev->mc_lock);
952
in6_dev_put(idev);
953
return -ENODEV;
954
}
955
956
for_each_mc_mclock(idev, mc) {
957
if (ipv6_addr_equal(&mc->mca_addr, addr)) {
958
mc->mca_users++;
959
ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
960
mutex_unlock(&idev->mc_lock);
961
in6_dev_put(idev);
962
return 0;
963
}
964
}
965
966
mc = mca_alloc(idev, addr, mode);
967
if (!mc) {
968
mutex_unlock(&idev->mc_lock);
969
in6_dev_put(idev);
970
return -ENOMEM;
971
}
972
973
rcu_assign_pointer(mc->next, idev->mc_list);
974
rcu_assign_pointer(idev->mc_list, mc);
975
976
mld_del_delrec(idev, mc);
977
igmp6_group_added(mc);
978
inet6_ifmcaddr_notify(dev, mc, RTM_NEWMULTICAST);
979
mutex_unlock(&idev->mc_lock);
980
981
return 0;
982
}
983
984
int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
985
{
986
return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
987
}
988
EXPORT_SYMBOL(ipv6_dev_mc_inc);
989
990
/*
991
* device multicast group del
992
*/
993
int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
994
{
995
struct ifmcaddr6 *ma, __rcu **map;
996
997
mutex_lock(&idev->mc_lock);
998
999
for (map = &idev->mc_list;
1000
(ma = mc_dereference(*map, idev));
1001
map = &ma->next) {
1002
if (ipv6_addr_equal(&ma->mca_addr, addr)) {
1003
if (--ma->mca_users == 0) {
1004
*map = ma->next;
1005
1006
igmp6_group_dropped(ma);
1007
inet6_ifmcaddr_notify(idev->dev, ma,
1008
RTM_DELMULTICAST);
1009
ip6_mc_clear_src(ma);
1010
mutex_unlock(&idev->mc_lock);
1011
1012
ma_put(ma);
1013
return 0;
1014
}
1015
mutex_unlock(&idev->mc_lock);
1016
return 0;
1017
}
1018
}
1019
1020
mutex_unlock(&idev->mc_lock);
1021
return -ENOENT;
1022
}
1023
1024
int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
1025
{
1026
struct inet6_dev *idev;
1027
int err;
1028
1029
idev = in6_dev_get(dev);
1030
if (!idev)
1031
return -ENODEV;
1032
1033
err = __ipv6_dev_mc_dec(idev, addr);
1034
in6_dev_put(idev);
1035
1036
return err;
1037
}
1038
EXPORT_SYMBOL(ipv6_dev_mc_dec);
1039
1040
/*
1041
* check if the interface/address pair is valid
1042
*/
1043
bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
1044
const struct in6_addr *src_addr)
1045
{
1046
struct inet6_dev *idev;
1047
struct ifmcaddr6 *mc;
1048
bool rv = false;
1049
1050
rcu_read_lock();
1051
idev = __in6_dev_get(dev);
1052
if (!idev)
1053
goto unlock;
1054
for_each_mc_rcu(idev, mc) {
1055
if (ipv6_addr_equal(&mc->mca_addr, group))
1056
break;
1057
}
1058
if (!mc)
1059
goto unlock;
1060
if (src_addr && !ipv6_addr_any(src_addr)) {
1061
struct ip6_sf_list *psf;
1062
1063
for_each_psf_rcu(mc, psf) {
1064
if (ipv6_addr_equal(&psf->sf_addr, src_addr))
1065
break;
1066
}
1067
if (psf)
1068
rv = READ_ONCE(psf->sf_count[MCAST_INCLUDE]) ||
1069
READ_ONCE(psf->sf_count[MCAST_EXCLUDE]) !=
1070
READ_ONCE(mc->mca_sfcount[MCAST_EXCLUDE]);
1071
else
1072
rv = READ_ONCE(mc->mca_sfcount[MCAST_EXCLUDE]) != 0;
1073
} else {
1074
rv = true; /* don't filter unspecified source */
1075
}
1076
unlock:
1077
rcu_read_unlock();
1078
return rv;
1079
}
1080
1081
static void mld_gq_start_work(struct inet6_dev *idev)
1082
{
1083
unsigned long tv = get_random_u32_below(idev->mc_maxdelay);
1084
1085
mc_assert_locked(idev);
1086
1087
idev->mc_gq_running = 1;
1088
if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
1089
in6_dev_hold(idev);
1090
}
1091
1092
static void mld_gq_stop_work(struct inet6_dev *idev)
1093
{
1094
mc_assert_locked(idev);
1095
1096
idev->mc_gq_running = 0;
1097
if (cancel_delayed_work(&idev->mc_gq_work))
1098
__in6_dev_put(idev);
1099
}
1100
1101
static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
1102
{
1103
unsigned long tv = get_random_u32_below(delay);
1104
1105
mc_assert_locked(idev);
1106
1107
if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
1108
in6_dev_hold(idev);
1109
}
1110
1111
static void mld_ifc_stop_work(struct inet6_dev *idev)
1112
{
1113
mc_assert_locked(idev);
1114
1115
idev->mc_ifc_count = 0;
1116
if (cancel_delayed_work(&idev->mc_ifc_work))
1117
__in6_dev_put(idev);
1118
}
1119
1120
static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
1121
{
1122
unsigned long tv = get_random_u32_below(delay);
1123
1124
mc_assert_locked(idev);
1125
1126
if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
1127
in6_dev_hold(idev);
1128
}
1129
1130
static void mld_dad_stop_work(struct inet6_dev *idev)
1131
{
1132
if (cancel_delayed_work(&idev->mc_dad_work))
1133
__in6_dev_put(idev);
1134
}
1135
1136
static void mld_query_stop_work(struct inet6_dev *idev)
1137
{
1138
spin_lock_bh(&idev->mc_query_lock);
1139
if (cancel_delayed_work(&idev->mc_query_work))
1140
__in6_dev_put(idev);
1141
spin_unlock_bh(&idev->mc_query_lock);
1142
}
1143
1144
static void mld_report_stop_work(struct inet6_dev *idev)
1145
{
1146
if (cancel_delayed_work_sync(&idev->mc_report_work))
1147
__in6_dev_put(idev);
1148
}
1149
1150
/* IGMP handling (alias multicast ICMPv6 messages) */
1151
static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1152
{
1153
unsigned long delay = resptime;
1154
1155
mc_assert_locked(ma->idev);
1156
1157
/* Do not start work for these addresses */
1158
if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
1159
IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1160
return;
1161
1162
if (cancel_delayed_work(&ma->mca_work)) {
1163
refcount_dec(&ma->mca_refcnt);
1164
delay = ma->mca_work.timer.expires - jiffies;
1165
}
1166
1167
if (delay >= resptime)
1168
delay = get_random_u32_below(resptime);
1169
1170
if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
1171
refcount_inc(&ma->mca_refcnt);
1172
ma->mca_flags |= MAF_TIMER_RUNNING;
1173
}
1174
1175
/* mark EXCLUDE-mode sources */
1176
static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1177
const struct in6_addr *srcs)
1178
{
1179
struct ip6_sf_list *psf;
1180
int i, scount;
1181
1182
mc_assert_locked(pmc->idev);
1183
1184
scount = 0;
1185
for_each_psf_mclock(pmc, psf) {
1186
if (scount == nsrcs)
1187
break;
1188
for (i = 0; i < nsrcs; i++) {
1189
/* skip inactive filters */
1190
if (psf->sf_count[MCAST_INCLUDE] ||
1191
pmc->mca_sfcount[MCAST_EXCLUDE] !=
1192
psf->sf_count[MCAST_EXCLUDE])
1193
break;
1194
if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1195
scount++;
1196
break;
1197
}
1198
}
1199
}
1200
pmc->mca_flags &= ~MAF_GSQUERY;
1201
if (scount == nsrcs) /* all sources excluded */
1202
return false;
1203
return true;
1204
}
1205
1206
static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1207
const struct in6_addr *srcs)
1208
{
1209
struct ip6_sf_list *psf;
1210
int i, scount;
1211
1212
mc_assert_locked(pmc->idev);
1213
1214
if (pmc->mca_sfmode == MCAST_EXCLUDE)
1215
return mld_xmarksources(pmc, nsrcs, srcs);
1216
1217
/* mark INCLUDE-mode sources */
1218
1219
scount = 0;
1220
for_each_psf_mclock(pmc, psf) {
1221
if (scount == nsrcs)
1222
break;
1223
for (i = 0; i < nsrcs; i++) {
1224
if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1225
psf->sf_gsresp = 1;
1226
scount++;
1227
break;
1228
}
1229
}
1230
}
1231
if (!scount) {
1232
pmc->mca_flags &= ~MAF_GSQUERY;
1233
return false;
1234
}
1235
pmc->mca_flags |= MAF_GSQUERY;
1236
return true;
1237
}
1238
1239
static int mld_force_mld_version(const struct inet6_dev *idev)
1240
{
1241
const struct net *net = dev_net(idev->dev);
1242
int all_force;
1243
1244
all_force = READ_ONCE(net->ipv6.devconf_all->force_mld_version);
1245
/* Normally, both are 0 here. If enforcement to a particular is
1246
* being used, individual device enforcement will have a lower
1247
* precedence over 'all' device (.../conf/all/force_mld_version).
1248
*/
1249
return all_force ?: READ_ONCE(idev->cnf.force_mld_version);
1250
}
1251
1252
static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
1253
{
1254
return mld_force_mld_version(idev) == 2;
1255
}
1256
1257
static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
1258
{
1259
return mld_force_mld_version(idev) == 1;
1260
}
1261
1262
static bool mld_in_v1_mode(const struct inet6_dev *idev)
1263
{
1264
if (mld_in_v2_mode_only(idev))
1265
return false;
1266
if (mld_in_v1_mode_only(idev))
1267
return true;
1268
if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
1269
return true;
1270
1271
return false;
1272
}
1273
1274
static void mld_set_v1_mode(struct inet6_dev *idev)
1275
{
1276
/* RFC3810, relevant sections:
1277
* - 9.1. Robustness Variable
1278
* - 9.2. Query Interval
1279
* - 9.3. Query Response Interval
1280
* - 9.12. Older Version Querier Present Timeout
1281
*/
1282
unsigned long switchback;
1283
1284
switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
1285
1286
idev->mc_v1_seen = jiffies + switchback;
1287
}
1288
1289
static void mld_update_qrv(struct inet6_dev *idev,
1290
const struct mld2_query *mlh2)
1291
{
1292
/* RFC3810, relevant sections:
1293
* - 5.1.8. QRV (Querier's Robustness Variable)
1294
* - 9.1. Robustness Variable
1295
*/
1296
1297
/* The value of the Robustness Variable MUST NOT be zero,
1298
* and SHOULD NOT be one. Catch this here if we ever run
1299
* into such a case in future.
1300
*/
1301
const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
1302
WARN_ON(idev->mc_qrv == 0);
1303
1304
if (mlh2->mld2q_qrv > 0)
1305
idev->mc_qrv = mlh2->mld2q_qrv;
1306
1307
if (unlikely(idev->mc_qrv < min_qrv)) {
1308
net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
1309
idev->mc_qrv, min_qrv);
1310
idev->mc_qrv = min_qrv;
1311
}
1312
}
1313
1314
static void mld_update_qi(struct inet6_dev *idev,
1315
const struct mld2_query *mlh2)
1316
{
1317
/* RFC3810, relevant sections:
1318
* - 5.1.9. QQIC (Querier's Query Interval Code)
1319
* - 9.2. Query Interval
1320
* - 9.12. Older Version Querier Present Timeout
1321
* (the [Query Interval] in the last Query received)
1322
*/
1323
unsigned long mc_qqi;
1324
1325
if (mlh2->mld2q_qqic < 128) {
1326
mc_qqi = mlh2->mld2q_qqic;
1327
} else {
1328
unsigned long mc_man, mc_exp;
1329
1330
mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
1331
mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
1332
1333
mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
1334
}
1335
1336
idev->mc_qi = mc_qqi * HZ;
1337
}
1338
1339
static void mld_update_qri(struct inet6_dev *idev,
1340
const struct mld2_query *mlh2)
1341
{
1342
/* RFC3810, relevant sections:
1343
* - 5.1.3. Maximum Response Code
1344
* - 9.3. Query Response Interval
1345
*/
1346
idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
1347
}
1348
1349
static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
1350
unsigned long *max_delay, bool v1_query)
1351
{
1352
unsigned long mldv1_md;
1353
1354
/* Ignore v1 queries */
1355
if (mld_in_v2_mode_only(idev))
1356
return -EINVAL;
1357
1358
mldv1_md = ntohs(mld->mld_maxdelay);
1359
1360
/* When in MLDv1 fallback and a MLDv2 router start-up being
1361
* unaware of current MLDv1 operation, the MRC == MRD mapping
1362
* only works when the exponential algorithm is not being
1363
* used (as MLDv1 is unaware of such things).
1364
*
1365
* According to the RFC author, the MLDv2 implementations
1366
* he's aware of all use a MRC < 32768 on start up queries.
1367
*
1368
* Thus, should we *ever* encounter something else larger
1369
* than that, just assume the maximum possible within our
1370
* reach.
1371
*/
1372
if (!v1_query)
1373
mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
1374
1375
*max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
1376
1377
/* MLDv1 router present: we need to go into v1 mode *only*
1378
* when an MLDv1 query is received as per section 9.12. of
1379
* RFC3810! And we know from RFC2710 section 3.7 that MLDv1
1380
* queries MUST be of exactly 24 octets.
1381
*/
1382
if (v1_query)
1383
mld_set_v1_mode(idev);
1384
1385
/* cancel MLDv2 report work */
1386
mld_gq_stop_work(idev);
1387
/* cancel the interface change work */
1388
mld_ifc_stop_work(idev);
1389
/* clear deleted report items */
1390
mld_clear_delrec(idev);
1391
1392
return 0;
1393
}
1394
1395
static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
1396
unsigned long *max_delay)
1397
{
1398
*max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
1399
1400
mld_update_qrv(idev, mld);
1401
mld_update_qi(idev, mld);
1402
mld_update_qri(idev, mld);
1403
1404
idev->mc_maxdelay = *max_delay;
1405
1406
return;
1407
}
1408
1409
/* called with rcu_read_lock() */
1410
void igmp6_event_query(struct sk_buff *skb)
1411
{
1412
struct inet6_dev *idev = __in6_dev_get(skb->dev);
1413
1414
if (!idev || idev->dead)
1415
goto out;
1416
1417
spin_lock_bh(&idev->mc_query_lock);
1418
if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) {
1419
__skb_queue_tail(&idev->mc_query_queue, skb);
1420
if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0))
1421
in6_dev_hold(idev);
1422
skb = NULL;
1423
}
1424
spin_unlock_bh(&idev->mc_query_lock);
1425
out:
1426
kfree_skb(skb);
1427
}
1428
1429
static void __mld_query_work(struct sk_buff *skb)
1430
{
1431
struct mld2_query *mlh2 = NULL;
1432
const struct in6_addr *group;
1433
unsigned long max_delay;
1434
struct inet6_dev *idev;
1435
struct ifmcaddr6 *ma;
1436
struct mld_msg *mld;
1437
int group_type;
1438
int mark = 0;
1439
int len, err;
1440
1441
if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
1442
goto kfree_skb;
1443
1444
/* compute payload length excluding extension headers */
1445
len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1446
len -= skb_network_header_len(skb);
1447
1448
/* RFC3810 6.2
1449
* Upon reception of an MLD message that contains a Query, the node
1450
* checks if the source address of the message is a valid link-local
1451
* address, if the Hop Limit is set to 1, and if the Router Alert
1452
* option is present in the Hop-By-Hop Options header of the IPv6
1453
* packet. If any of these checks fails, the packet is dropped.
1454
*/
1455
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1456
ipv6_hdr(skb)->hop_limit != 1 ||
1457
!(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1458
IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1459
goto kfree_skb;
1460
1461
idev = in6_dev_get(skb->dev);
1462
if (!idev)
1463
goto kfree_skb;
1464
1465
mld = (struct mld_msg *)icmp6_hdr(skb);
1466
group = &mld->mld_mca;
1467
group_type = ipv6_addr_type(group);
1468
1469
if (group_type != IPV6_ADDR_ANY &&
1470
!(group_type&IPV6_ADDR_MULTICAST))
1471
goto out;
1472
1473
if (len < MLD_V1_QUERY_LEN) {
1474
goto out;
1475
} else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
1476
err = mld_process_v1(idev, mld, &max_delay,
1477
len == MLD_V1_QUERY_LEN);
1478
if (err < 0)
1479
goto out;
1480
} else if (len >= MLD_V2_QUERY_LEN_MIN) {
1481
int srcs_offset = sizeof(struct mld2_query) -
1482
sizeof(struct icmp6hdr);
1483
1484
if (!pskb_may_pull(skb, srcs_offset))
1485
goto out;
1486
1487
mlh2 = (struct mld2_query *)skb_transport_header(skb);
1488
1489
mld_process_v2(idev, mlh2, &max_delay);
1490
1491
if (group_type == IPV6_ADDR_ANY) { /* general query */
1492
if (mlh2->mld2q_nsrcs)
1493
goto out; /* no sources allowed */
1494
1495
mld_gq_start_work(idev);
1496
goto out;
1497
}
1498
/* mark sources to include, if group & source-specific */
1499
if (mlh2->mld2q_nsrcs != 0) {
1500
if (!pskb_may_pull(skb, srcs_offset +
1501
ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1502
goto out;
1503
1504
mlh2 = (struct mld2_query *)skb_transport_header(skb);
1505
mark = 1;
1506
}
1507
} else {
1508
goto out;
1509
}
1510
1511
if (group_type == IPV6_ADDR_ANY) {
1512
for_each_mc_mclock(idev, ma) {
1513
igmp6_group_queried(ma, max_delay);
1514
}
1515
} else {
1516
for_each_mc_mclock(idev, ma) {
1517
if (!ipv6_addr_equal(group, &ma->mca_addr))
1518
continue;
1519
if (ma->mca_flags & MAF_TIMER_RUNNING) {
1520
/* gsquery <- gsquery && mark */
1521
if (!mark)
1522
ma->mca_flags &= ~MAF_GSQUERY;
1523
} else {
1524
/* gsquery <- mark */
1525
if (mark)
1526
ma->mca_flags |= MAF_GSQUERY;
1527
else
1528
ma->mca_flags &= ~MAF_GSQUERY;
1529
}
1530
if (!(ma->mca_flags & MAF_GSQUERY) ||
1531
mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
1532
igmp6_group_queried(ma, max_delay);
1533
break;
1534
}
1535
}
1536
1537
out:
1538
in6_dev_put(idev);
1539
kfree_skb:
1540
consume_skb(skb);
1541
}
1542
1543
static void mld_query_work(struct work_struct *work)
1544
{
1545
struct inet6_dev *idev = container_of(to_delayed_work(work),
1546
struct inet6_dev,
1547
mc_query_work);
1548
struct sk_buff_head q;
1549
struct sk_buff *skb;
1550
bool rework = false;
1551
int cnt = 0;
1552
1553
skb_queue_head_init(&q);
1554
1555
spin_lock_bh(&idev->mc_query_lock);
1556
while ((skb = __skb_dequeue(&idev->mc_query_queue))) {
1557
__skb_queue_tail(&q, skb);
1558
1559
if (++cnt >= MLD_MAX_QUEUE) {
1560
rework = true;
1561
break;
1562
}
1563
}
1564
spin_unlock_bh(&idev->mc_query_lock);
1565
1566
mutex_lock(&idev->mc_lock);
1567
while ((skb = __skb_dequeue(&q)))
1568
__mld_query_work(skb);
1569
mutex_unlock(&idev->mc_lock);
1570
1571
if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0))
1572
return;
1573
1574
in6_dev_put(idev);
1575
}
1576
1577
/* called with rcu_read_lock() */
1578
void igmp6_event_report(struct sk_buff *skb)
1579
{
1580
struct inet6_dev *idev = __in6_dev_get(skb->dev);
1581
1582
if (!idev || idev->dead)
1583
goto out;
1584
1585
spin_lock_bh(&idev->mc_report_lock);
1586
if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) {
1587
__skb_queue_tail(&idev->mc_report_queue, skb);
1588
if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0))
1589
in6_dev_hold(idev);
1590
skb = NULL;
1591
}
1592
spin_unlock_bh(&idev->mc_report_lock);
1593
out:
1594
kfree_skb(skb);
1595
}
1596
1597
static void __mld_report_work(struct sk_buff *skb)
1598
{
1599
struct inet6_dev *idev;
1600
struct ifmcaddr6 *ma;
1601
struct mld_msg *mld;
1602
int addr_type;
1603
1604
/* Our own report looped back. Ignore it. */
1605
if (skb->pkt_type == PACKET_LOOPBACK)
1606
goto kfree_skb;
1607
1608
/* send our report if the MC router may not have heard this report */
1609
if (skb->pkt_type != PACKET_MULTICAST &&
1610
skb->pkt_type != PACKET_BROADCAST)
1611
goto kfree_skb;
1612
1613
if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
1614
goto kfree_skb;
1615
1616
mld = (struct mld_msg *)icmp6_hdr(skb);
1617
1618
/* Drop reports with not link local source */
1619
addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
1620
if (addr_type != IPV6_ADDR_ANY &&
1621
!(addr_type&IPV6_ADDR_LINKLOCAL))
1622
goto kfree_skb;
1623
1624
idev = in6_dev_get(skb->dev);
1625
if (!idev)
1626
goto kfree_skb;
1627
1628
/*
1629
* Cancel the work for this group
1630
*/
1631
1632
for_each_mc_mclock(idev, ma) {
1633
if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
1634
if (cancel_delayed_work(&ma->mca_work))
1635
refcount_dec(&ma->mca_refcnt);
1636
ma->mca_flags &= ~(MAF_LAST_REPORTER |
1637
MAF_TIMER_RUNNING);
1638
break;
1639
}
1640
}
1641
1642
in6_dev_put(idev);
1643
kfree_skb:
1644
consume_skb(skb);
1645
}
1646
1647
static void mld_report_work(struct work_struct *work)
1648
{
1649
struct inet6_dev *idev = container_of(to_delayed_work(work),
1650
struct inet6_dev,
1651
mc_report_work);
1652
struct sk_buff_head q;
1653
struct sk_buff *skb;
1654
bool rework = false;
1655
int cnt = 0;
1656
1657
skb_queue_head_init(&q);
1658
spin_lock_bh(&idev->mc_report_lock);
1659
while ((skb = __skb_dequeue(&idev->mc_report_queue))) {
1660
__skb_queue_tail(&q, skb);
1661
1662
if (++cnt >= MLD_MAX_QUEUE) {
1663
rework = true;
1664
break;
1665
}
1666
}
1667
spin_unlock_bh(&idev->mc_report_lock);
1668
1669
mutex_lock(&idev->mc_lock);
1670
while ((skb = __skb_dequeue(&q)))
1671
__mld_report_work(skb);
1672
mutex_unlock(&idev->mc_lock);
1673
1674
if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0))
1675
return;
1676
1677
in6_dev_put(idev);
1678
}
1679
1680
static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
1681
int gdeleted, int sdeleted)
1682
{
1683
switch (type) {
1684
case MLD2_MODE_IS_INCLUDE:
1685
case MLD2_MODE_IS_EXCLUDE:
1686
if (gdeleted || sdeleted)
1687
return false;
1688
if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
1689
if (pmc->mca_sfmode == MCAST_INCLUDE)
1690
return true;
1691
/* don't include if this source is excluded
1692
* in all filters
1693
*/
1694
if (psf->sf_count[MCAST_INCLUDE])
1695
return type == MLD2_MODE_IS_INCLUDE;
1696
return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1697
psf->sf_count[MCAST_EXCLUDE];
1698
}
1699
return false;
1700
case MLD2_CHANGE_TO_INCLUDE:
1701
if (gdeleted || sdeleted)
1702
return false;
1703
return psf->sf_count[MCAST_INCLUDE] != 0;
1704
case MLD2_CHANGE_TO_EXCLUDE:
1705
if (gdeleted || sdeleted)
1706
return false;
1707
if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
1708
psf->sf_count[MCAST_INCLUDE])
1709
return false;
1710
return pmc->mca_sfcount[MCAST_EXCLUDE] ==
1711
psf->sf_count[MCAST_EXCLUDE];
1712
case MLD2_ALLOW_NEW_SOURCES:
1713
if (gdeleted || !psf->sf_crcount)
1714
return false;
1715
return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
1716
case MLD2_BLOCK_OLD_SOURCES:
1717
if (pmc->mca_sfmode == MCAST_INCLUDE)
1718
return gdeleted || (psf->sf_crcount && sdeleted);
1719
return psf->sf_crcount && !gdeleted && !sdeleted;
1720
}
1721
return false;
1722
}
1723
1724
static int
1725
mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
1726
{
1727
struct ip6_sf_list *psf;
1728
int scount = 0;
1729
1730
for_each_psf_mclock(pmc, psf) {
1731
if (!is_in(pmc, psf, type, gdeleted, sdeleted))
1732
continue;
1733
scount++;
1734
}
1735
return scount;
1736
}
1737
1738
static void ip6_mc_hdr(const struct sock *sk, struct sk_buff *skb,
1739
struct net_device *dev, const struct in6_addr *saddr,
1740
const struct in6_addr *daddr, int proto, int len)
1741
{
1742
struct ipv6hdr *hdr;
1743
1744
skb->protocol = htons(ETH_P_IPV6);
1745
skb->dev = dev;
1746
1747
skb_reset_network_header(skb);
1748
skb_put(skb, sizeof(struct ipv6hdr));
1749
hdr = ipv6_hdr(skb);
1750
1751
ip6_flow_hdr(hdr, 0, 0);
1752
1753
hdr->payload_len = htons(len);
1754
hdr->nexthdr = proto;
1755
hdr->hop_limit = READ_ONCE(inet6_sk(sk)->hop_limit);
1756
1757
hdr->saddr = *saddr;
1758
hdr->daddr = *daddr;
1759
}
1760
1761
static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1762
{
1763
u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
1764
2, 0, 0, IPV6_TLV_PADN, 0 };
1765
struct net_device *dev = idev->dev;
1766
int hlen = LL_RESERVED_SPACE(dev);
1767
int tlen = dev->needed_tailroom;
1768
const struct in6_addr *saddr;
1769
struct in6_addr addr_buf;
1770
struct mld2_report *pmr;
1771
struct sk_buff *skb;
1772
unsigned int size;
1773
struct sock *sk;
1774
struct net *net;
1775
1776
/* we assume size > sizeof(ra) here
1777
* Also try to not allocate high-order pages for big MTU
1778
*/
1779
size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
1780
skb = alloc_skb(size, GFP_KERNEL);
1781
if (!skb)
1782
return NULL;
1783
1784
skb->priority = TC_PRIO_CONTROL;
1785
skb_reserve(skb, hlen);
1786
skb_tailroom_reserve(skb, mtu, tlen);
1787
1788
rcu_read_lock();
1789
1790
net = dev_net_rcu(dev);
1791
sk = net->ipv6.igmp_sk;
1792
skb_set_owner_w(skb, sk);
1793
1794
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
1795
/* <draft-ietf-magma-mld-source-05.txt>:
1796
* use unspecified address as the source address
1797
* when a valid link-local address is not available.
1798
*/
1799
saddr = &in6addr_any;
1800
} else
1801
saddr = &addr_buf;
1802
1803
ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
1804
1805
rcu_read_unlock();
1806
1807
skb_put_data(skb, ra, sizeof(ra));
1808
1809
skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
1810
skb_put(skb, sizeof(*pmr));
1811
pmr = (struct mld2_report *)skb_transport_header(skb);
1812
pmr->mld2r_type = ICMPV6_MLD2_REPORT;
1813
pmr->mld2r_resv1 = 0;
1814
pmr->mld2r_cksum = 0;
1815
pmr->mld2r_resv2 = 0;
1816
pmr->mld2r_ngrec = 0;
1817
return skb;
1818
}
1819
1820
static void mld_sendpack(struct sk_buff *skb)
1821
{
1822
struct ipv6hdr *pip6 = ipv6_hdr(skb);
1823
struct mld2_report *pmr =
1824
(struct mld2_report *)skb_transport_header(skb);
1825
int payload_len, mldlen;
1826
struct inet6_dev *idev;
1827
struct net *net = dev_net(skb->dev);
1828
int err;
1829
struct flowi6 fl6;
1830
struct dst_entry *dst;
1831
1832
rcu_read_lock();
1833
idev = __in6_dev_get(skb->dev);
1834
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
1835
1836
payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
1837
sizeof(*pip6);
1838
mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
1839
pip6->payload_len = htons(payload_len);
1840
1841
pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
1842
IPPROTO_ICMPV6,
1843
csum_partial(skb_transport_header(skb),
1844
mldlen, 0));
1845
1846
icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
1847
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
1848
skb->dev->ifindex);
1849
dst = icmp6_dst_alloc(skb->dev, &fl6);
1850
1851
err = 0;
1852
if (IS_ERR(dst)) {
1853
err = PTR_ERR(dst);
1854
dst = NULL;
1855
}
1856
skb_dst_set(skb, dst);
1857
if (err)
1858
goto err_out;
1859
1860
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
1861
net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
1862
dst_output);
1863
out:
1864
if (!err) {
1865
ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
1866
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1867
} else {
1868
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1869
}
1870
1871
rcu_read_unlock();
1872
return;
1873
1874
err_out:
1875
kfree_skb(skb);
1876
goto out;
1877
}
1878
1879
static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1880
{
1881
return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
1882
}
1883
1884
static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1885
int type, struct mld2_grec **ppgr, unsigned int mtu)
1886
{
1887
struct mld2_report *pmr;
1888
struct mld2_grec *pgr;
1889
1890
if (!skb) {
1891
skb = mld_newpack(pmc->idev, mtu);
1892
if (!skb)
1893
return NULL;
1894
}
1895
pgr = skb_put(skb, sizeof(struct mld2_grec));
1896
pgr->grec_type = type;
1897
pgr->grec_auxwords = 0;
1898
pgr->grec_nsrcs = 0;
1899
pgr->grec_mca = pmc->mca_addr; /* structure copy */
1900
pmr = (struct mld2_report *)skb_transport_header(skb);
1901
pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
1902
*ppgr = pgr;
1903
return skb;
1904
}
1905
1906
#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
1907
1908
static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1909
int type, int gdeleted, int sdeleted,
1910
int crsend)
1911
{
1912
struct ip6_sf_list *psf, *psf_prev, *psf_next;
1913
int scount, stotal, first, isquery, truncate;
1914
struct ip6_sf_list __rcu **psf_list;
1915
struct inet6_dev *idev = pmc->idev;
1916
struct net_device *dev = idev->dev;
1917
struct mld2_grec *pgr = NULL;
1918
struct mld2_report *pmr;
1919
unsigned int mtu;
1920
1921
mc_assert_locked(idev);
1922
1923
if (pmc->mca_flags & MAF_NOREPORT)
1924
return skb;
1925
1926
mtu = READ_ONCE(dev->mtu);
1927
if (mtu < IPV6_MIN_MTU)
1928
return skb;
1929
1930
isquery = type == MLD2_MODE_IS_INCLUDE ||
1931
type == MLD2_MODE_IS_EXCLUDE;
1932
truncate = type == MLD2_MODE_IS_EXCLUDE ||
1933
type == MLD2_CHANGE_TO_EXCLUDE;
1934
1935
stotal = scount = 0;
1936
1937
psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
1938
1939
if (!rcu_access_pointer(*psf_list))
1940
goto empty_source;
1941
1942
pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
1943
1944
/* EX and TO_EX get a fresh packet, if needed */
1945
if (truncate) {
1946
if (pmr && pmr->mld2r_ngrec &&
1947
AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1948
if (skb)
1949
mld_sendpack(skb);
1950
skb = mld_newpack(idev, mtu);
1951
}
1952
}
1953
first = 1;
1954
psf_prev = NULL;
1955
for (psf = mc_dereference(*psf_list, idev);
1956
psf;
1957
psf = psf_next) {
1958
struct in6_addr *psrc;
1959
1960
psf_next = mc_dereference(psf->sf_next, idev);
1961
1962
if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
1963
psf_prev = psf;
1964
continue;
1965
}
1966
1967
/* Based on RFC3810 6.1. Should not send source-list change
1968
* records when there is a filter mode change.
1969
*/
1970
if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
1971
(!gdeleted && pmc->mca_crcount)) &&
1972
(type == MLD2_ALLOW_NEW_SOURCES ||
1973
type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount)
1974
goto decrease_sf_crcount;
1975
1976
/* clear marks on query responses */
1977
if (isquery)
1978
psf->sf_gsresp = 0;
1979
1980
if (AVAILABLE(skb) < sizeof(*psrc) +
1981
first*sizeof(struct mld2_grec)) {
1982
if (truncate && !first)
1983
break; /* truncate these */
1984
if (pgr)
1985
pgr->grec_nsrcs = htons(scount);
1986
if (skb)
1987
mld_sendpack(skb);
1988
skb = mld_newpack(idev, mtu);
1989
first = 1;
1990
scount = 0;
1991
}
1992
if (first) {
1993
skb = add_grhead(skb, pmc, type, &pgr, mtu);
1994
first = 0;
1995
}
1996
if (!skb)
1997
return NULL;
1998
psrc = skb_put(skb, sizeof(*psrc));
1999
*psrc = psf->sf_addr;
2000
scount++; stotal++;
2001
if ((type == MLD2_ALLOW_NEW_SOURCES ||
2002
type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
2003
decrease_sf_crcount:
2004
psf->sf_crcount--;
2005
if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
2006
if (psf_prev)
2007
rcu_assign_pointer(psf_prev->sf_next,
2008
mc_dereference(psf->sf_next, idev));
2009
else
2010
rcu_assign_pointer(*psf_list,
2011
mc_dereference(psf->sf_next, idev));
2012
kfree_rcu(psf, rcu);
2013
continue;
2014
}
2015
}
2016
psf_prev = psf;
2017
}
2018
2019
empty_source:
2020
if (!stotal) {
2021
if (type == MLD2_ALLOW_NEW_SOURCES ||
2022
type == MLD2_BLOCK_OLD_SOURCES)
2023
return skb;
2024
if (pmc->mca_crcount || isquery || crsend) {
2025
/* make sure we have room for group header */
2026
if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
2027
mld_sendpack(skb);
2028
skb = NULL; /* add_grhead will get a new one */
2029
}
2030
skb = add_grhead(skb, pmc, type, &pgr, mtu);
2031
}
2032
}
2033
if (pgr)
2034
pgr->grec_nsrcs = htons(scount);
2035
2036
if (isquery)
2037
pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
2038
return skb;
2039
}
2040
2041
static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
2042
{
2043
struct sk_buff *skb = NULL;
2044
int type;
2045
2046
mc_assert_locked(idev);
2047
2048
if (!pmc) {
2049
for_each_mc_mclock(idev, pmc) {
2050
if (pmc->mca_flags & MAF_NOREPORT)
2051
continue;
2052
if (pmc->mca_sfcount[MCAST_EXCLUDE])
2053
type = MLD2_MODE_IS_EXCLUDE;
2054
else
2055
type = MLD2_MODE_IS_INCLUDE;
2056
skb = add_grec(skb, pmc, type, 0, 0, 0);
2057
}
2058
} else {
2059
if (pmc->mca_sfcount[MCAST_EXCLUDE])
2060
type = MLD2_MODE_IS_EXCLUDE;
2061
else
2062
type = MLD2_MODE_IS_INCLUDE;
2063
skb = add_grec(skb, pmc, type, 0, 0, 0);
2064
}
2065
if (skb)
2066
mld_sendpack(skb);
2067
}
2068
2069
/* remove zero-count source records from a source filter list */
2070
static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *idev)
2071
{
2072
struct ip6_sf_list *psf_prev, *psf_next, *psf;
2073
2074
psf_prev = NULL;
2075
for (psf = mc_dereference(*ppsf, idev);
2076
psf;
2077
psf = psf_next) {
2078
psf_next = mc_dereference(psf->sf_next, idev);
2079
if (psf->sf_crcount == 0) {
2080
if (psf_prev)
2081
rcu_assign_pointer(psf_prev->sf_next,
2082
mc_dereference(psf->sf_next, idev));
2083
else
2084
rcu_assign_pointer(*ppsf,
2085
mc_dereference(psf->sf_next, idev));
2086
kfree_rcu(psf, rcu);
2087
} else {
2088
psf_prev = psf;
2089
}
2090
}
2091
}
2092
2093
static void mld_send_cr(struct inet6_dev *idev)
2094
{
2095
struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
2096
struct sk_buff *skb = NULL;
2097
int type, dtype;
2098
2099
/* deleted MCA's */
2100
pmc_prev = NULL;
2101
for (pmc = mc_dereference(idev->mc_tomb, idev);
2102
pmc;
2103
pmc = pmc_next) {
2104
pmc_next = mc_dereference(pmc->next, idev);
2105
if (pmc->mca_sfmode == MCAST_INCLUDE) {
2106
type = MLD2_BLOCK_OLD_SOURCES;
2107
dtype = MLD2_BLOCK_OLD_SOURCES;
2108
skb = add_grec(skb, pmc, type, 1, 0, 0);
2109
skb = add_grec(skb, pmc, dtype, 1, 1, 0);
2110
}
2111
if (pmc->mca_crcount) {
2112
if (pmc->mca_sfmode == MCAST_EXCLUDE) {
2113
type = MLD2_CHANGE_TO_INCLUDE;
2114
skb = add_grec(skb, pmc, type, 1, 0, 0);
2115
}
2116
pmc->mca_crcount--;
2117
if (pmc->mca_crcount == 0) {
2118
mld_clear_zeros(&pmc->mca_tomb, idev);
2119
mld_clear_zeros(&pmc->mca_sources, idev);
2120
}
2121
}
2122
if (pmc->mca_crcount == 0 &&
2123
!rcu_access_pointer(pmc->mca_tomb) &&
2124
!rcu_access_pointer(pmc->mca_sources)) {
2125
if (pmc_prev)
2126
rcu_assign_pointer(pmc_prev->next, pmc_next);
2127
else
2128
rcu_assign_pointer(idev->mc_tomb, pmc_next);
2129
in6_dev_put(pmc->idev);
2130
kfree_rcu(pmc, rcu);
2131
} else
2132
pmc_prev = pmc;
2133
}
2134
2135
/* change recs */
2136
for_each_mc_mclock(idev, pmc) {
2137
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2138
type = MLD2_BLOCK_OLD_SOURCES;
2139
dtype = MLD2_ALLOW_NEW_SOURCES;
2140
} else {
2141
type = MLD2_ALLOW_NEW_SOURCES;
2142
dtype = MLD2_BLOCK_OLD_SOURCES;
2143
}
2144
skb = add_grec(skb, pmc, type, 0, 0, 0);
2145
skb = add_grec(skb, pmc, dtype, 0, 1, 0); /* deleted sources */
2146
2147
/* filter mode changes */
2148
if (pmc->mca_crcount) {
2149
if (pmc->mca_sfmode == MCAST_EXCLUDE)
2150
type = MLD2_CHANGE_TO_EXCLUDE;
2151
else
2152
type = MLD2_CHANGE_TO_INCLUDE;
2153
skb = add_grec(skb, pmc, type, 0, 0, 0);
2154
pmc->mca_crcount--;
2155
}
2156
}
2157
if (!skb)
2158
return;
2159
(void) mld_sendpack(skb);
2160
}
2161
2162
static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
2163
{
2164
const struct in6_addr *snd_addr, *saddr;
2165
int err, len, payload_len, full_len;
2166
struct in6_addr addr_buf;
2167
struct inet6_dev *idev;
2168
struct sk_buff *skb;
2169
struct mld_msg *hdr;
2170
int hlen = LL_RESERVED_SPACE(dev);
2171
int tlen = dev->needed_tailroom;
2172
u8 ra[8] = { IPPROTO_ICMPV6, 0,
2173
IPV6_TLV_ROUTERALERT, 2, 0, 0,
2174
IPV6_TLV_PADN, 0 };
2175
struct dst_entry *dst;
2176
struct flowi6 fl6;
2177
struct net *net;
2178
struct sock *sk;
2179
2180
if (type == ICMPV6_MGM_REDUCTION)
2181
snd_addr = &in6addr_linklocal_allrouters;
2182
else
2183
snd_addr = addr;
2184
2185
len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
2186
payload_len = len + sizeof(ra);
2187
full_len = sizeof(struct ipv6hdr) + payload_len;
2188
2189
skb = alloc_skb(hlen + tlen + full_len, GFP_KERNEL);
2190
2191
rcu_read_lock();
2192
2193
net = dev_net_rcu(dev);
2194
idev = __in6_dev_get(dev);
2195
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
2196
if (!skb) {
2197
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2198
rcu_read_unlock();
2199
return;
2200
}
2201
sk = net->ipv6.igmp_sk;
2202
skb_set_owner_w(skb, sk);
2203
2204
skb->priority = TC_PRIO_CONTROL;
2205
skb_reserve(skb, hlen);
2206
2207
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
2208
/* <draft-ietf-magma-mld-source-05.txt>:
2209
* use unspecified address as the source address
2210
* when a valid link-local address is not available.
2211
*/
2212
saddr = &in6addr_any;
2213
} else
2214
saddr = &addr_buf;
2215
2216
ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
2217
2218
skb_put_data(skb, ra, sizeof(ra));
2219
2220
hdr = skb_put_zero(skb, sizeof(struct mld_msg));
2221
hdr->mld_type = type;
2222
hdr->mld_mca = *addr;
2223
2224
hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
2225
IPPROTO_ICMPV6,
2226
csum_partial(hdr, len, 0));
2227
2228
icmpv6_flow_init(sk, &fl6, type,
2229
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
2230
skb->dev->ifindex);
2231
dst = icmp6_dst_alloc(skb->dev, &fl6);
2232
if (IS_ERR(dst)) {
2233
err = PTR_ERR(dst);
2234
goto err_out;
2235
}
2236
2237
skb_dst_set(skb, dst);
2238
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
2239
net, sk, skb, NULL, skb->dev,
2240
dst_output);
2241
out:
2242
if (!err) {
2243
ICMP6MSGOUT_INC_STATS(net, idev, type);
2244
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
2245
} else
2246
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
2247
2248
rcu_read_unlock();
2249
return;
2250
2251
err_out:
2252
kfree_skb(skb);
2253
goto out;
2254
}
2255
2256
static void mld_send_initial_cr(struct inet6_dev *idev)
2257
{
2258
struct ifmcaddr6 *pmc;
2259
struct sk_buff *skb;
2260
int type;
2261
2262
mc_assert_locked(idev);
2263
2264
if (mld_in_v1_mode(idev))
2265
return;
2266
2267
skb = NULL;
2268
for_each_mc_mclock(idev, pmc) {
2269
if (pmc->mca_sfcount[MCAST_EXCLUDE])
2270
type = MLD2_CHANGE_TO_EXCLUDE;
2271
else
2272
type = MLD2_ALLOW_NEW_SOURCES;
2273
skb = add_grec(skb, pmc, type, 0, 0, 1);
2274
}
2275
if (skb)
2276
mld_sendpack(skb);
2277
}
2278
2279
void ipv6_mc_dad_complete(struct inet6_dev *idev)
2280
{
2281
mutex_lock(&idev->mc_lock);
2282
idev->mc_dad_count = idev->mc_qrv;
2283
if (idev->mc_dad_count) {
2284
mld_send_initial_cr(idev);
2285
idev->mc_dad_count--;
2286
if (idev->mc_dad_count)
2287
mld_dad_start_work(idev,
2288
unsolicited_report_interval(idev));
2289
}
2290
mutex_unlock(&idev->mc_lock);
2291
}
2292
2293
static void mld_dad_work(struct work_struct *work)
2294
{
2295
struct inet6_dev *idev = container_of(to_delayed_work(work),
2296
struct inet6_dev,
2297
mc_dad_work);
2298
mutex_lock(&idev->mc_lock);
2299
mld_send_initial_cr(idev);
2300
if (idev->mc_dad_count) {
2301
idev->mc_dad_count--;
2302
if (idev->mc_dad_count)
2303
mld_dad_start_work(idev,
2304
unsolicited_report_interval(idev));
2305
}
2306
mutex_unlock(&idev->mc_lock);
2307
in6_dev_put(idev);
2308
}
2309
2310
static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
2311
const struct in6_addr *psfsrc)
2312
{
2313
struct ip6_sf_list *psf, *psf_prev;
2314
int rv = 0;
2315
2316
mc_assert_locked(pmc->idev);
2317
2318
psf_prev = NULL;
2319
for_each_psf_mclock(pmc, psf) {
2320
if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2321
break;
2322
psf_prev = psf;
2323
}
2324
if (!psf || psf->sf_count[sfmode] == 0) {
2325
/* source filter not found, or count wrong => bug */
2326
return -ESRCH;
2327
}
2328
WRITE_ONCE(psf->sf_count[sfmode], psf->sf_count[sfmode] - 1);
2329
if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
2330
struct inet6_dev *idev = pmc->idev;
2331
2332
/* no more filters for this source */
2333
if (psf_prev)
2334
rcu_assign_pointer(psf_prev->sf_next,
2335
mc_dereference(psf->sf_next, idev));
2336
else
2337
rcu_assign_pointer(pmc->mca_sources,
2338
mc_dereference(psf->sf_next, idev));
2339
2340
if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
2341
!mld_in_v1_mode(idev)) {
2342
psf->sf_crcount = idev->mc_qrv;
2343
rcu_assign_pointer(psf->sf_next,
2344
mc_dereference(pmc->mca_tomb, idev));
2345
rcu_assign_pointer(pmc->mca_tomb, psf);
2346
rv = 1;
2347
} else {
2348
kfree_rcu(psf, rcu);
2349
}
2350
}
2351
return rv;
2352
}
2353
2354
static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2355
int sfmode, int sfcount, const struct in6_addr *psfsrc,
2356
int delta)
2357
{
2358
struct ifmcaddr6 *pmc;
2359
int changerec = 0;
2360
int i, err;
2361
2362
if (!idev)
2363
return -ENODEV;
2364
2365
mc_assert_locked(idev);
2366
2367
for_each_mc_mclock(idev, pmc) {
2368
if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2369
break;
2370
}
2371
if (!pmc)
2372
return -ESRCH;
2373
2374
sf_markstate(pmc);
2375
if (!delta) {
2376
if (!pmc->mca_sfcount[sfmode])
2377
return -EINVAL;
2378
2379
pmc->mca_sfcount[sfmode]--;
2380
}
2381
err = 0;
2382
for (i = 0; i < sfcount; i++) {
2383
int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
2384
2385
changerec |= rv > 0;
2386
if (!err && rv < 0)
2387
err = rv;
2388
}
2389
if (pmc->mca_sfmode == MCAST_EXCLUDE &&
2390
pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
2391
pmc->mca_sfcount[MCAST_INCLUDE]) {
2392
struct ip6_sf_list *psf;
2393
2394
/* filter mode change */
2395
pmc->mca_sfmode = MCAST_INCLUDE;
2396
pmc->mca_crcount = idev->mc_qrv;
2397
idev->mc_ifc_count = pmc->mca_crcount;
2398
for_each_psf_mclock(pmc, psf)
2399
psf->sf_crcount = 0;
2400
mld_ifc_event(pmc->idev);
2401
} else if (sf_setstate(pmc) || changerec) {
2402
mld_ifc_event(pmc->idev);
2403
}
2404
2405
return err;
2406
}
2407
2408
/* Add multicast single-source filter to the interface list */
2409
static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
2410
const struct in6_addr *psfsrc)
2411
{
2412
struct ip6_sf_list *psf, *psf_prev;
2413
2414
mc_assert_locked(pmc->idev);
2415
2416
psf_prev = NULL;
2417
for_each_psf_mclock(pmc, psf) {
2418
if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
2419
break;
2420
psf_prev = psf;
2421
}
2422
if (!psf) {
2423
psf = kzalloc(sizeof(*psf), GFP_KERNEL);
2424
if (!psf)
2425
return -ENOBUFS;
2426
2427
psf->sf_addr = *psfsrc;
2428
if (psf_prev) {
2429
rcu_assign_pointer(psf_prev->sf_next, psf);
2430
} else {
2431
rcu_assign_pointer(pmc->mca_sources, psf);
2432
}
2433
}
2434
WRITE_ONCE(psf->sf_count[sfmode], psf->sf_count[sfmode] + 1);
2435
return 0;
2436
}
2437
2438
static void sf_markstate(struct ifmcaddr6 *pmc)
2439
{
2440
int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2441
struct ip6_sf_list *psf;
2442
2443
mc_assert_locked(pmc->idev);
2444
2445
for_each_psf_mclock(pmc, psf) {
2446
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2447
psf->sf_oldin = mca_xcount ==
2448
psf->sf_count[MCAST_EXCLUDE] &&
2449
!psf->sf_count[MCAST_INCLUDE];
2450
} else {
2451
psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
2452
}
2453
}
2454
}
2455
2456
static int sf_setstate(struct ifmcaddr6 *pmc)
2457
{
2458
int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
2459
struct ip6_sf_list *psf, *dpsf;
2460
int qrv = pmc->idev->mc_qrv;
2461
int new_in, rv;
2462
2463
mc_assert_locked(pmc->idev);
2464
2465
rv = 0;
2466
for_each_psf_mclock(pmc, psf) {
2467
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
2468
new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
2469
!psf->sf_count[MCAST_INCLUDE];
2470
} else
2471
new_in = psf->sf_count[MCAST_INCLUDE] != 0;
2472
if (new_in) {
2473
if (!psf->sf_oldin) {
2474
struct ip6_sf_list *prev = NULL;
2475
2476
for_each_psf_tomb(pmc, dpsf) {
2477
if (ipv6_addr_equal(&dpsf->sf_addr,
2478
&psf->sf_addr))
2479
break;
2480
prev = dpsf;
2481
}
2482
if (dpsf) {
2483
if (prev)
2484
rcu_assign_pointer(prev->sf_next,
2485
mc_dereference(dpsf->sf_next,
2486
pmc->idev));
2487
else
2488
rcu_assign_pointer(pmc->mca_tomb,
2489
mc_dereference(dpsf->sf_next,
2490
pmc->idev));
2491
kfree_rcu(dpsf, rcu);
2492
}
2493
psf->sf_crcount = qrv;
2494
rv++;
2495
}
2496
} else if (psf->sf_oldin) {
2497
psf->sf_crcount = 0;
2498
/*
2499
* add or update "delete" records if an active filter
2500
* is now inactive
2501
*/
2502
2503
for_each_psf_tomb(pmc, dpsf)
2504
if (ipv6_addr_equal(&dpsf->sf_addr,
2505
&psf->sf_addr))
2506
break;
2507
if (!dpsf) {
2508
dpsf = kmalloc(sizeof(*dpsf), GFP_KERNEL);
2509
if (!dpsf)
2510
continue;
2511
*dpsf = *psf;
2512
rcu_assign_pointer(dpsf->sf_next,
2513
mc_dereference(pmc->mca_tomb, pmc->idev));
2514
rcu_assign_pointer(pmc->mca_tomb, dpsf);
2515
}
2516
dpsf->sf_crcount = qrv;
2517
rv++;
2518
}
2519
}
2520
return rv;
2521
}
2522
2523
/* Add multicast source filter list to the interface list */
2524
static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2525
int sfmode, int sfcount, const struct in6_addr *psfsrc,
2526
int delta)
2527
{
2528
struct ifmcaddr6 *pmc;
2529
int isexclude;
2530
int i, err;
2531
2532
if (!idev)
2533
return -ENODEV;
2534
2535
mc_assert_locked(idev);
2536
2537
for_each_mc_mclock(idev, pmc) {
2538
if (ipv6_addr_equal(pmca, &pmc->mca_addr))
2539
break;
2540
}
2541
if (!pmc)
2542
return -ESRCH;
2543
2544
sf_markstate(pmc);
2545
isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
2546
if (!delta)
2547
WRITE_ONCE(pmc->mca_sfcount[sfmode],
2548
pmc->mca_sfcount[sfmode] + 1);
2549
err = 0;
2550
for (i = 0; i < sfcount; i++) {
2551
err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
2552
if (err)
2553
break;
2554
}
2555
if (err) {
2556
int j;
2557
2558
if (!delta)
2559
WRITE_ONCE(pmc->mca_sfcount[sfmode],
2560
pmc->mca_sfcount[sfmode] - 1);
2561
for (j = 0; j < i; j++)
2562
ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
2563
} else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
2564
struct ip6_sf_list *psf;
2565
2566
/* filter mode change */
2567
if (pmc->mca_sfcount[MCAST_EXCLUDE])
2568
pmc->mca_sfmode = MCAST_EXCLUDE;
2569
else if (pmc->mca_sfcount[MCAST_INCLUDE])
2570
pmc->mca_sfmode = MCAST_INCLUDE;
2571
/* else no filters; keep old mode for reports */
2572
2573
pmc->mca_crcount = idev->mc_qrv;
2574
idev->mc_ifc_count = pmc->mca_crcount;
2575
for_each_psf_mclock(pmc, psf)
2576
psf->sf_crcount = 0;
2577
mld_ifc_event(idev);
2578
} else if (sf_setstate(pmc)) {
2579
mld_ifc_event(idev);
2580
}
2581
return err;
2582
}
2583
2584
static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
2585
{
2586
struct ip6_sf_list *psf, *nextpsf;
2587
2588
mc_assert_locked(pmc->idev);
2589
2590
for (psf = mc_dereference(pmc->mca_tomb, pmc->idev);
2591
psf;
2592
psf = nextpsf) {
2593
nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2594
kfree_rcu(psf, rcu);
2595
}
2596
RCU_INIT_POINTER(pmc->mca_tomb, NULL);
2597
for (psf = mc_dereference(pmc->mca_sources, pmc->idev);
2598
psf;
2599
psf = nextpsf) {
2600
nextpsf = mc_dereference(psf->sf_next, pmc->idev);
2601
kfree_rcu(psf, rcu);
2602
}
2603
RCU_INIT_POINTER(pmc->mca_sources, NULL);
2604
pmc->mca_sfmode = MCAST_EXCLUDE;
2605
pmc->mca_sfcount[MCAST_INCLUDE] = 0;
2606
/* Paired with the READ_ONCE() from ipv6_chk_mcast_addr() */
2607
WRITE_ONCE(pmc->mca_sfcount[MCAST_EXCLUDE], 1);
2608
}
2609
2610
static void igmp6_join_group(struct ifmcaddr6 *ma)
2611
{
2612
unsigned long delay;
2613
2614
mc_assert_locked(ma->idev);
2615
2616
if (ma->mca_flags & MAF_NOREPORT)
2617
return;
2618
2619
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2620
2621
delay = get_random_u32_below(unsolicited_report_interval(ma->idev));
2622
2623
if (cancel_delayed_work(&ma->mca_work)) {
2624
refcount_dec(&ma->mca_refcnt);
2625
delay = ma->mca_work.timer.expires - jiffies;
2626
}
2627
2628
if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
2629
refcount_inc(&ma->mca_refcnt);
2630
ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
2631
}
2632
2633
static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
2634
struct inet6_dev *idev)
2635
{
2636
struct ip6_sf_socklist *psl;
2637
int err;
2638
2639
psl = sock_dereference(iml->sflist, sk);
2640
2641
if (idev)
2642
mutex_lock(&idev->mc_lock);
2643
2644
if (!psl) {
2645
/* any-source empty exclude case */
2646
err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
2647
} else {
2648
err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
2649
psl->sl_count, psl->sl_addr, 0);
2650
RCU_INIT_POINTER(iml->sflist, NULL);
2651
atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
2652
&sk->sk_omem_alloc);
2653
kfree_rcu(psl, rcu);
2654
}
2655
2656
if (idev)
2657
mutex_unlock(&idev->mc_lock);
2658
2659
return err;
2660
}
2661
2662
static void igmp6_leave_group(struct ifmcaddr6 *ma)
2663
{
2664
mc_assert_locked(ma->idev);
2665
2666
if (mld_in_v1_mode(ma->idev)) {
2667
if (ma->mca_flags & MAF_LAST_REPORTER) {
2668
igmp6_send(&ma->mca_addr, ma->idev->dev,
2669
ICMPV6_MGM_REDUCTION);
2670
}
2671
} else {
2672
mld_add_delrec(ma->idev, ma);
2673
mld_ifc_event(ma->idev);
2674
}
2675
}
2676
2677
static void mld_gq_work(struct work_struct *work)
2678
{
2679
struct inet6_dev *idev = container_of(to_delayed_work(work),
2680
struct inet6_dev,
2681
mc_gq_work);
2682
2683
mutex_lock(&idev->mc_lock);
2684
mld_send_report(idev, NULL);
2685
idev->mc_gq_running = 0;
2686
mutex_unlock(&idev->mc_lock);
2687
2688
in6_dev_put(idev);
2689
}
2690
2691
static void mld_ifc_work(struct work_struct *work)
2692
{
2693
struct inet6_dev *idev = container_of(to_delayed_work(work),
2694
struct inet6_dev,
2695
mc_ifc_work);
2696
2697
mutex_lock(&idev->mc_lock);
2698
mld_send_cr(idev);
2699
2700
if (idev->mc_ifc_count) {
2701
idev->mc_ifc_count--;
2702
if (idev->mc_ifc_count)
2703
mld_ifc_start_work(idev,
2704
unsolicited_report_interval(idev));
2705
}
2706
mutex_unlock(&idev->mc_lock);
2707
in6_dev_put(idev);
2708
}
2709
2710
static void mld_ifc_event(struct inet6_dev *idev)
2711
{
2712
mc_assert_locked(idev);
2713
2714
if (mld_in_v1_mode(idev))
2715
return;
2716
2717
idev->mc_ifc_count = idev->mc_qrv;
2718
mld_ifc_start_work(idev, 1);
2719
}
2720
2721
static void mld_mca_work(struct work_struct *work)
2722
{
2723
struct ifmcaddr6 *ma = container_of(to_delayed_work(work),
2724
struct ifmcaddr6, mca_work);
2725
2726
mutex_lock(&ma->idev->mc_lock);
2727
if (mld_in_v1_mode(ma->idev))
2728
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
2729
else
2730
mld_send_report(ma->idev, ma);
2731
ma->mca_flags |= MAF_LAST_REPORTER;
2732
ma->mca_flags &= ~MAF_TIMER_RUNNING;
2733
mutex_unlock(&ma->idev->mc_lock);
2734
2735
ma_put(ma);
2736
}
2737
2738
/* Device changing type */
2739
2740
void ipv6_mc_unmap(struct inet6_dev *idev)
2741
{
2742
struct ifmcaddr6 *i;
2743
2744
/* Install multicast list, except for all-nodes (already installed) */
2745
2746
mutex_lock(&idev->mc_lock);
2747
for_each_mc_mclock(idev, i)
2748
igmp6_group_dropped(i);
2749
mutex_unlock(&idev->mc_lock);
2750
}
2751
2752
void ipv6_mc_remap(struct inet6_dev *idev)
2753
{
2754
ipv6_mc_up(idev);
2755
}
2756
2757
/* Device going down */
2758
void ipv6_mc_down(struct inet6_dev *idev)
2759
{
2760
struct ifmcaddr6 *i;
2761
2762
mutex_lock(&idev->mc_lock);
2763
/* Withdraw multicast list */
2764
for_each_mc_mclock(idev, i)
2765
igmp6_group_dropped(i);
2766
mutex_unlock(&idev->mc_lock);
2767
2768
/* Should stop work after group drop. or we will
2769
* start work again in mld_ifc_event()
2770
*/
2771
mld_query_stop_work(idev);
2772
mld_report_stop_work(idev);
2773
2774
mutex_lock(&idev->mc_lock);
2775
mld_ifc_stop_work(idev);
2776
mld_gq_stop_work(idev);
2777
mutex_unlock(&idev->mc_lock);
2778
2779
mld_dad_stop_work(idev);
2780
}
2781
2782
static void ipv6_mc_reset(struct inet6_dev *idev)
2783
{
2784
idev->mc_qrv = sysctl_mld_qrv;
2785
idev->mc_qi = MLD_QI_DEFAULT;
2786
idev->mc_qri = MLD_QRI_DEFAULT;
2787
idev->mc_v1_seen = 0;
2788
idev->mc_maxdelay = unsolicited_report_interval(idev);
2789
}
2790
2791
/* Device going up */
2792
2793
void ipv6_mc_up(struct inet6_dev *idev)
2794
{
2795
struct ifmcaddr6 *i;
2796
2797
/* Install multicast list, except for all-nodes (already installed) */
2798
2799
ipv6_mc_reset(idev);
2800
mutex_lock(&idev->mc_lock);
2801
for_each_mc_mclock(idev, i) {
2802
mld_del_delrec(idev, i);
2803
igmp6_group_added(i);
2804
}
2805
mutex_unlock(&idev->mc_lock);
2806
}
2807
2808
/* IPv6 device initialization. */
2809
2810
void ipv6_mc_init_dev(struct inet6_dev *idev)
2811
{
2812
idev->mc_gq_running = 0;
2813
INIT_DELAYED_WORK(&idev->mc_gq_work, mld_gq_work);
2814
RCU_INIT_POINTER(idev->mc_tomb, NULL);
2815
idev->mc_ifc_count = 0;
2816
INIT_DELAYED_WORK(&idev->mc_ifc_work, mld_ifc_work);
2817
INIT_DELAYED_WORK(&idev->mc_dad_work, mld_dad_work);
2818
INIT_DELAYED_WORK(&idev->mc_query_work, mld_query_work);
2819
INIT_DELAYED_WORK(&idev->mc_report_work, mld_report_work);
2820
skb_queue_head_init(&idev->mc_query_queue);
2821
skb_queue_head_init(&idev->mc_report_queue);
2822
spin_lock_init(&idev->mc_query_lock);
2823
spin_lock_init(&idev->mc_report_lock);
2824
mutex_init(&idev->mc_lock);
2825
ipv6_mc_reset(idev);
2826
}
2827
2828
/*
2829
* Device is about to be destroyed: clean up.
2830
*/
2831
2832
void ipv6_mc_destroy_dev(struct inet6_dev *idev)
2833
{
2834
struct ifmcaddr6 *i;
2835
2836
/* Deactivate works */
2837
ipv6_mc_down(idev);
2838
mutex_lock(&idev->mc_lock);
2839
mld_clear_delrec(idev);
2840
mutex_unlock(&idev->mc_lock);
2841
mld_clear_query(idev);
2842
mld_clear_report(idev);
2843
2844
/* Delete all-nodes address. */
2845
/* We cannot call ipv6_dev_mc_dec() directly, our caller in
2846
* addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
2847
* fail.
2848
*/
2849
__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
2850
2851
if (idev->cnf.forwarding)
2852
__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
2853
2854
mutex_lock(&idev->mc_lock);
2855
while ((i = mc_dereference(idev->mc_list, idev))) {
2856
rcu_assign_pointer(idev->mc_list, mc_dereference(i->next, idev));
2857
2858
ip6_mc_clear_src(i);
2859
ma_put(i);
2860
}
2861
mutex_unlock(&idev->mc_lock);
2862
}
2863
2864
static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
2865
{
2866
struct ifmcaddr6 *pmc;
2867
2868
mutex_lock(&idev->mc_lock);
2869
if (mld_in_v1_mode(idev)) {
2870
for_each_mc_mclock(idev, pmc)
2871
igmp6_join_group(pmc);
2872
} else {
2873
mld_send_report(idev, NULL);
2874
}
2875
mutex_unlock(&idev->mc_lock);
2876
}
2877
2878
static int ipv6_mc_netdev_event(struct notifier_block *this,
2879
unsigned long event,
2880
void *ptr)
2881
{
2882
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2883
struct inet6_dev *idev = __in6_dev_get(dev);
2884
2885
switch (event) {
2886
case NETDEV_RESEND_IGMP:
2887
if (idev)
2888
ipv6_mc_rejoin_groups(idev);
2889
break;
2890
default:
2891
break;
2892
}
2893
2894
return NOTIFY_DONE;
2895
}
2896
2897
static struct notifier_block igmp6_netdev_notifier = {
2898
.notifier_call = ipv6_mc_netdev_event,
2899
};
2900
2901
#ifdef CONFIG_PROC_FS
2902
struct igmp6_mc_iter_state {
2903
struct seq_net_private p;
2904
struct net_device *dev;
2905
struct inet6_dev *idev;
2906
};
2907
2908
#define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
2909
2910
static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
2911
{
2912
struct ifmcaddr6 *im = NULL;
2913
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2914
struct net *net = seq_file_net(seq);
2915
2916
state->idev = NULL;
2917
for_each_netdev_rcu(net, state->dev) {
2918
struct inet6_dev *idev;
2919
idev = __in6_dev_get(state->dev);
2920
if (!idev)
2921
continue;
2922
2923
im = rcu_dereference(idev->mc_list);
2924
if (im) {
2925
state->idev = idev;
2926
break;
2927
}
2928
}
2929
return im;
2930
}
2931
2932
static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
2933
{
2934
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2935
2936
im = rcu_dereference(im->next);
2937
while (!im) {
2938
state->dev = next_net_device_rcu(state->dev);
2939
if (!state->dev) {
2940
state->idev = NULL;
2941
break;
2942
}
2943
state->idev = __in6_dev_get(state->dev);
2944
if (!state->idev)
2945
continue;
2946
im = rcu_dereference(state->idev->mc_list);
2947
}
2948
return im;
2949
}
2950
2951
static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
2952
{
2953
struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
2954
if (im)
2955
while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
2956
--pos;
2957
return pos ? NULL : im;
2958
}
2959
2960
static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
2961
__acquires(RCU)
2962
{
2963
rcu_read_lock();
2964
return igmp6_mc_get_idx(seq, *pos);
2965
}
2966
2967
static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2968
{
2969
struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
2970
2971
++*pos;
2972
return im;
2973
}
2974
2975
static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
2976
__releases(RCU)
2977
{
2978
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2979
2980
if (likely(state->idev))
2981
state->idev = NULL;
2982
state->dev = NULL;
2983
rcu_read_unlock();
2984
}
2985
2986
static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
2987
{
2988
struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
2989
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
2990
2991
seq_printf(seq,
2992
"%-4d %-15s %pi6 %5d %08X %ld\n",
2993
state->dev->ifindex, state->dev->name,
2994
&im->mca_addr,
2995
im->mca_users, im->mca_flags,
2996
(im->mca_flags & MAF_TIMER_RUNNING) ?
2997
jiffies_to_clock_t(im->mca_work.timer.expires - jiffies) : 0);
2998
return 0;
2999
}
3000
3001
static const struct seq_operations igmp6_mc_seq_ops = {
3002
.start = igmp6_mc_seq_start,
3003
.next = igmp6_mc_seq_next,
3004
.stop = igmp6_mc_seq_stop,
3005
.show = igmp6_mc_seq_show,
3006
};
3007
3008
struct igmp6_mcf_iter_state {
3009
struct seq_net_private p;
3010
struct net_device *dev;
3011
struct inet6_dev *idev;
3012
struct ifmcaddr6 *im;
3013
};
3014
3015
#define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
3016
3017
static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
3018
{
3019
struct ip6_sf_list *psf = NULL;
3020
struct ifmcaddr6 *im = NULL;
3021
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3022
struct net *net = seq_file_net(seq);
3023
3024
state->idev = NULL;
3025
state->im = NULL;
3026
for_each_netdev_rcu(net, state->dev) {
3027
struct inet6_dev *idev;
3028
idev = __in6_dev_get(state->dev);
3029
if (unlikely(idev == NULL))
3030
continue;
3031
3032
im = rcu_dereference(idev->mc_list);
3033
if (likely(im)) {
3034
psf = rcu_dereference(im->mca_sources);
3035
if (likely(psf)) {
3036
state->im = im;
3037
state->idev = idev;
3038
break;
3039
}
3040
}
3041
}
3042
return psf;
3043
}
3044
3045
static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
3046
{
3047
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3048
3049
psf = rcu_dereference(psf->sf_next);
3050
while (!psf) {
3051
state->im = rcu_dereference(state->im->next);
3052
while (!state->im) {
3053
state->dev = next_net_device_rcu(state->dev);
3054
if (!state->dev) {
3055
state->idev = NULL;
3056
goto out;
3057
}
3058
state->idev = __in6_dev_get(state->dev);
3059
if (!state->idev)
3060
continue;
3061
state->im = rcu_dereference(state->idev->mc_list);
3062
}
3063
psf = rcu_dereference(state->im->mca_sources);
3064
}
3065
out:
3066
return psf;
3067
}
3068
3069
static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
3070
{
3071
struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
3072
if (psf)
3073
while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
3074
--pos;
3075
return pos ? NULL : psf;
3076
}
3077
3078
static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
3079
__acquires(RCU)
3080
{
3081
rcu_read_lock();
3082
return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
3083
}
3084
3085
static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3086
{
3087
struct ip6_sf_list *psf;
3088
if (v == SEQ_START_TOKEN)
3089
psf = igmp6_mcf_get_first(seq);
3090
else
3091
psf = igmp6_mcf_get_next(seq, v);
3092
++*pos;
3093
return psf;
3094
}
3095
3096
static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
3097
__releases(RCU)
3098
{
3099
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3100
3101
if (likely(state->im))
3102
state->im = NULL;
3103
if (likely(state->idev))
3104
state->idev = NULL;
3105
3106
state->dev = NULL;
3107
rcu_read_unlock();
3108
}
3109
3110
static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
3111
{
3112
struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
3113
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
3114
3115
if (v == SEQ_START_TOKEN) {
3116
seq_puts(seq, "Idx Device Multicast Address Source Address INC EXC\n");
3117
} else {
3118
seq_printf(seq,
3119
"%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
3120
state->dev->ifindex, state->dev->name,
3121
&state->im->mca_addr,
3122
&psf->sf_addr,
3123
READ_ONCE(psf->sf_count[MCAST_INCLUDE]),
3124
READ_ONCE(psf->sf_count[MCAST_EXCLUDE]));
3125
}
3126
return 0;
3127
}
3128
3129
static const struct seq_operations igmp6_mcf_seq_ops = {
3130
.start = igmp6_mcf_seq_start,
3131
.next = igmp6_mcf_seq_next,
3132
.stop = igmp6_mcf_seq_stop,
3133
.show = igmp6_mcf_seq_show,
3134
};
3135
3136
static int __net_init igmp6_proc_init(struct net *net)
3137
{
3138
int err;
3139
3140
err = -ENOMEM;
3141
if (!proc_create_net("igmp6", 0444, net->proc_net, &igmp6_mc_seq_ops,
3142
sizeof(struct igmp6_mc_iter_state)))
3143
goto out;
3144
if (!proc_create_net("mcfilter6", 0444, net->proc_net,
3145
&igmp6_mcf_seq_ops,
3146
sizeof(struct igmp6_mcf_iter_state)))
3147
goto out_proc_net_igmp6;
3148
3149
err = 0;
3150
out:
3151
return err;
3152
3153
out_proc_net_igmp6:
3154
remove_proc_entry("igmp6", net->proc_net);
3155
goto out;
3156
}
3157
3158
static void __net_exit igmp6_proc_exit(struct net *net)
3159
{
3160
remove_proc_entry("mcfilter6", net->proc_net);
3161
remove_proc_entry("igmp6", net->proc_net);
3162
}
3163
#else
3164
static inline int igmp6_proc_init(struct net *net)
3165
{
3166
return 0;
3167
}
3168
static inline void igmp6_proc_exit(struct net *net)
3169
{
3170
}
3171
#endif
3172
3173
static int __net_init igmp6_net_init(struct net *net)
3174
{
3175
int err;
3176
3177
err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
3178
SOCK_RAW, IPPROTO_ICMPV6, net);
3179
if (err < 0) {
3180
pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
3181
err);
3182
goto out;
3183
}
3184
3185
inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
3186
net->ipv6.igmp_sk->sk_allocation = GFP_KERNEL;
3187
3188
err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
3189
SOCK_RAW, IPPROTO_ICMPV6, net);
3190
if (err < 0) {
3191
pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
3192
err);
3193
goto out_sock_create;
3194
}
3195
3196
err = igmp6_proc_init(net);
3197
if (err)
3198
goto out_sock_create_autojoin;
3199
3200
return 0;
3201
3202
out_sock_create_autojoin:
3203
inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
3204
out_sock_create:
3205
inet_ctl_sock_destroy(net->ipv6.igmp_sk);
3206
out:
3207
return err;
3208
}
3209
3210
static void __net_exit igmp6_net_exit(struct net *net)
3211
{
3212
inet_ctl_sock_destroy(net->ipv6.igmp_sk);
3213
inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
3214
igmp6_proc_exit(net);
3215
}
3216
3217
static struct pernet_operations igmp6_net_ops = {
3218
.init = igmp6_net_init,
3219
.exit = igmp6_net_exit,
3220
};
3221
3222
int __init igmp6_init(void)
3223
{
3224
int err;
3225
3226
err = register_pernet_subsys(&igmp6_net_ops);
3227
if (err)
3228
return err;
3229
3230
mld_wq = create_workqueue("mld");
3231
if (!mld_wq) {
3232
unregister_pernet_subsys(&igmp6_net_ops);
3233
return -ENOMEM;
3234
}
3235
3236
return err;
3237
}
3238
3239
int __init igmp6_late_init(void)
3240
{
3241
return register_netdevice_notifier(&igmp6_netdev_notifier);
3242
}
3243
3244
void igmp6_cleanup(void)
3245
{
3246
unregister_pernet_subsys(&igmp6_net_ops);
3247
destroy_workqueue(mld_wq);
3248
}
3249
3250
void igmp6_late_cleanup(void)
3251
{
3252
unregister_netdevice_notifier(&igmp6_netdev_notifier);
3253
}
3254
3255