Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/ipv4/igmp.c
15109 views
1
/*
2
* Linux NET3: Internet Group Management Protocol [IGMP]
3
*
4
* This code implements the IGMP protocol as defined in RFC1112. There has
5
* been a further revision of this protocol since which is now supported.
6
*
7
* If you have trouble with this module be careful what gcc you have used,
8
* the older version didn't come out right using gcc 2.5.8, the newer one
9
* seems to fall out with gcc 2.6.2.
10
*
11
* Authors:
12
* Alan Cox <[email protected]>
13
*
14
* This program is free software; you can redistribute it and/or
15
* modify it under the terms of the GNU General Public License
16
* as published by the Free Software Foundation; either version
17
* 2 of the License, or (at your option) any later version.
18
*
19
* Fixes:
20
*
21
* Alan Cox : Added lots of __inline__ to optimise
22
* the memory usage of all the tiny little
23
* functions.
24
* Alan Cox : Dumped the header building experiment.
25
* Alan Cox : Minor tweaks ready for multicast routing
26
* and extended IGMP protocol.
27
* Alan Cox : Removed a load of inline directives. Gcc 2.5.8
28
* writes utterly bogus code otherwise (sigh)
29
* fixed IGMP loopback to behave in the manner
30
* desired by mrouted, fixed the fact it has been
31
* broken since 1.3.6 and cleaned up a few minor
32
* points.
33
*
34
* Chih-Jen Chang : Tried to revise IGMP to Version 2
35
* Tsu-Sheng Tsao E-mail: [email protected] and [email protected]
36
* The enhancements are mainly based on Steve Deering's
37
* ipmulti-3.5 source code.
38
* Chih-Jen Chang : Added the igmp_get_mrouter_info and
39
* Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of
40
* the mrouted version on that device.
41
* Chih-Jen Chang : Added the max_resp_time parameter to
42
* Tsu-Sheng Tsao igmp_heard_query(). Using this parameter
43
* to identify the multicast router version
44
* and do what the IGMP version 2 specified.
45
* Chih-Jen Chang : Added a timer to revert to IGMP V2 router
46
* Tsu-Sheng Tsao if the specified time expired.
47
* Alan Cox : Stop IGMP from 0.0.0.0 being accepted.
48
* Alan Cox : Use GFP_ATOMIC in the right places.
49
* Christian Daudt : igmp timer wasn't set for local group
50
* memberships but was being deleted,
51
* which caused a "del_timer() called
52
* from %p with timer not initialized\n"
53
* message (960131).
54
* Christian Daudt : removed del_timer from
55
* igmp_timer_expire function (960205).
56
* Christian Daudt : igmp_heard_report now only calls
57
* igmp_timer_expire if tm->running is
58
* true (960216).
59
* Malcolm Beattie : ttl comparison wrong in igmp_rcv made
60
* igmp_heard_query never trigger. Expiry
61
* miscalculation fixed in igmp_heard_query
62
* and random() made to return unsigned to
63
* prevent negative expiry times.
64
* Alexey Kuznetsov: Wrong group leaving behaviour, backport
65
* fix from pending 2.1.x patches.
66
* Alan Cox: Forget to enable FDDI support earlier.
67
* Alexey Kuznetsov: Fixed leaving groups on device down.
68
* Alexey Kuznetsov: Accordance to igmp-v2-06 draft.
69
* David L Stevens: IGMPv3 support, with help from
70
* Vinay Kulkarni
71
*/
72
73
#include <linux/module.h>
74
#include <linux/slab.h>
75
#include <asm/uaccess.h>
76
#include <asm/system.h>
77
#include <linux/types.h>
78
#include <linux/kernel.h>
79
#include <linux/jiffies.h>
80
#include <linux/string.h>
81
#include <linux/socket.h>
82
#include <linux/sockios.h>
83
#include <linux/in.h>
84
#include <linux/inet.h>
85
#include <linux/netdevice.h>
86
#include <linux/skbuff.h>
87
#include <linux/inetdevice.h>
88
#include <linux/igmp.h>
89
#include <linux/if_arp.h>
90
#include <linux/rtnetlink.h>
91
#include <linux/times.h>
92
93
#include <net/net_namespace.h>
94
#include <net/arp.h>
95
#include <net/ip.h>
96
#include <net/protocol.h>
97
#include <net/route.h>
98
#include <net/sock.h>
99
#include <net/checksum.h>
100
#include <linux/netfilter_ipv4.h>
101
#ifdef CONFIG_IP_MROUTE
102
#include <linux/mroute.h>
103
#endif
104
#ifdef CONFIG_PROC_FS
105
#include <linux/proc_fs.h>
106
#include <linux/seq_file.h>
107
#endif
108
109
#define IP_MAX_MEMBERSHIPS 20
110
#define IP_MAX_MSF 10
111
112
#ifdef CONFIG_IP_MULTICAST
113
/* Parameter names and values are taken from igmp-v2-06 draft */
114
115
#define IGMP_V1_Router_Present_Timeout (400*HZ)
116
#define IGMP_V2_Router_Present_Timeout (400*HZ)
117
#define IGMP_Unsolicited_Report_Interval (10*HZ)
118
#define IGMP_Query_Response_Interval (10*HZ)
119
#define IGMP_Unsolicited_Report_Count 2
120
121
122
#define IGMP_Initial_Report_Delay (1)
123
124
/* IGMP_Initial_Report_Delay is not from IGMP specs!
125
* IGMP specs require to report membership immediately after
126
* joining a group, but we delay the first report by a
127
* small interval. It seems more natural and still does not
128
* contradict to specs provided this delay is small enough.
129
*/
130
131
#define IGMP_V1_SEEN(in_dev) \
132
(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
133
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
134
((in_dev)->mr_v1_seen && \
135
time_before(jiffies, (in_dev)->mr_v1_seen)))
136
#define IGMP_V2_SEEN(in_dev) \
137
(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
138
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
139
((in_dev)->mr_v2_seen && \
140
time_before(jiffies, (in_dev)->mr_v2_seen)))
141
142
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
143
static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
144
static void igmpv3_clear_delrec(struct in_device *in_dev);
145
static int sf_setstate(struct ip_mc_list *pmc);
146
static void sf_markstate(struct ip_mc_list *pmc);
147
#endif
148
static void ip_mc_clear_src(struct ip_mc_list *pmc);
149
static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
150
int sfcount, __be32 *psfsrc, int delta);
151
152
static void ip_ma_put(struct ip_mc_list *im)
153
{
154
if (atomic_dec_and_test(&im->refcnt)) {
155
in_dev_put(im->interface);
156
kfree_rcu(im, rcu);
157
}
158
}
159
160
#define for_each_pmc_rcu(in_dev, pmc) \
161
for (pmc = rcu_dereference(in_dev->mc_list); \
162
pmc != NULL; \
163
pmc = rcu_dereference(pmc->next_rcu))
164
165
#define for_each_pmc_rtnl(in_dev, pmc) \
166
for (pmc = rtnl_dereference(in_dev->mc_list); \
167
pmc != NULL; \
168
pmc = rtnl_dereference(pmc->next_rcu))
169
170
#ifdef CONFIG_IP_MULTICAST
171
172
/*
173
* Timer management
174
*/
175
176
static void igmp_stop_timer(struct ip_mc_list *im)
177
{
178
spin_lock_bh(&im->lock);
179
if (del_timer(&im->timer))
180
atomic_dec(&im->refcnt);
181
im->tm_running = 0;
182
im->reporter = 0;
183
im->unsolicit_count = 0;
184
spin_unlock_bh(&im->lock);
185
}
186
187
/* It must be called with locked im->lock */
188
static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
189
{
190
int tv = net_random() % max_delay;
191
192
im->tm_running = 1;
193
if (!mod_timer(&im->timer, jiffies+tv+2))
194
atomic_inc(&im->refcnt);
195
}
196
197
static void igmp_gq_start_timer(struct in_device *in_dev)
198
{
199
int tv = net_random() % in_dev->mr_maxdelay;
200
201
in_dev->mr_gq_running = 1;
202
if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
203
in_dev_hold(in_dev);
204
}
205
206
static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
207
{
208
int tv = net_random() % delay;
209
210
if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
211
in_dev_hold(in_dev);
212
}
213
214
static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
215
{
216
spin_lock_bh(&im->lock);
217
im->unsolicit_count = 0;
218
if (del_timer(&im->timer)) {
219
if ((long)(im->timer.expires-jiffies) < max_delay) {
220
add_timer(&im->timer);
221
im->tm_running = 1;
222
spin_unlock_bh(&im->lock);
223
return;
224
}
225
atomic_dec(&im->refcnt);
226
}
227
igmp_start_timer(im, max_delay);
228
spin_unlock_bh(&im->lock);
229
}
230
231
232
/*
233
* Send an IGMP report.
234
*/
235
236
#define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4)
237
238
239
static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
240
int gdeleted, int sdeleted)
241
{
242
switch (type) {
243
case IGMPV3_MODE_IS_INCLUDE:
244
case IGMPV3_MODE_IS_EXCLUDE:
245
if (gdeleted || sdeleted)
246
return 0;
247
if (!(pmc->gsquery && !psf->sf_gsresp)) {
248
if (pmc->sfmode == MCAST_INCLUDE)
249
return 1;
250
/* don't include if this source is excluded
251
* in all filters
252
*/
253
if (psf->sf_count[MCAST_INCLUDE])
254
return type == IGMPV3_MODE_IS_INCLUDE;
255
return pmc->sfcount[MCAST_EXCLUDE] ==
256
psf->sf_count[MCAST_EXCLUDE];
257
}
258
return 0;
259
case IGMPV3_CHANGE_TO_INCLUDE:
260
if (gdeleted || sdeleted)
261
return 0;
262
return psf->sf_count[MCAST_INCLUDE] != 0;
263
case IGMPV3_CHANGE_TO_EXCLUDE:
264
if (gdeleted || sdeleted)
265
return 0;
266
if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
267
psf->sf_count[MCAST_INCLUDE])
268
return 0;
269
return pmc->sfcount[MCAST_EXCLUDE] ==
270
psf->sf_count[MCAST_EXCLUDE];
271
case IGMPV3_ALLOW_NEW_SOURCES:
272
if (gdeleted || !psf->sf_crcount)
273
return 0;
274
return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
275
case IGMPV3_BLOCK_OLD_SOURCES:
276
if (pmc->sfmode == MCAST_INCLUDE)
277
return gdeleted || (psf->sf_crcount && sdeleted);
278
return psf->sf_crcount && !gdeleted && !sdeleted;
279
}
280
return 0;
281
}
282
283
static int
284
igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
285
{
286
struct ip_sf_list *psf;
287
int scount = 0;
288
289
for (psf=pmc->sources; psf; psf=psf->sf_next) {
290
if (!is_in(pmc, psf, type, gdeleted, sdeleted))
291
continue;
292
scount++;
293
}
294
return scount;
295
}
296
297
#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
298
299
static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
300
{
301
struct sk_buff *skb;
302
struct rtable *rt;
303
struct iphdr *pip;
304
struct igmpv3_report *pig;
305
struct net *net = dev_net(dev);
306
struct flowi4 fl4;
307
308
while (1) {
309
skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
310
GFP_ATOMIC | __GFP_NOWARN);
311
if (skb)
312
break;
313
size >>= 1;
314
if (size < 256)
315
return NULL;
316
}
317
igmp_skb_size(skb) = size;
318
319
rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
320
0, 0,
321
IPPROTO_IGMP, 0, dev->ifindex);
322
if (IS_ERR(rt)) {
323
kfree_skb(skb);
324
return NULL;
325
}
326
327
skb_dst_set(skb, &rt->dst);
328
skb->dev = dev;
329
330
skb_reserve(skb, LL_RESERVED_SPACE(dev));
331
332
skb_reset_network_header(skb);
333
pip = ip_hdr(skb);
334
skb_put(skb, sizeof(struct iphdr) + 4);
335
336
pip->version = 4;
337
pip->ihl = (sizeof(struct iphdr)+4)>>2;
338
pip->tos = 0xc0;
339
pip->frag_off = htons(IP_DF);
340
pip->ttl = 1;
341
pip->daddr = fl4.daddr;
342
pip->saddr = fl4.saddr;
343
pip->protocol = IPPROTO_IGMP;
344
pip->tot_len = 0; /* filled in later */
345
ip_select_ident(pip, &rt->dst, NULL);
346
((u8*)&pip[1])[0] = IPOPT_RA;
347
((u8*)&pip[1])[1] = 4;
348
((u8*)&pip[1])[2] = 0;
349
((u8*)&pip[1])[3] = 0;
350
351
skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
352
skb_put(skb, sizeof(*pig));
353
pig = igmpv3_report_hdr(skb);
354
pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT;
355
pig->resv1 = 0;
356
pig->csum = 0;
357
pig->resv2 = 0;
358
pig->ngrec = 0;
359
return skb;
360
}
361
362
static int igmpv3_sendpack(struct sk_buff *skb)
363
{
364
struct igmphdr *pig = igmp_hdr(skb);
365
const int igmplen = skb->tail - skb->transport_header;
366
367
pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
368
369
return ip_local_out(skb);
370
}
371
372
static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
373
{
374
return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel);
375
}
376
377
static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
378
int type, struct igmpv3_grec **ppgr)
379
{
380
struct net_device *dev = pmc->interface->dev;
381
struct igmpv3_report *pih;
382
struct igmpv3_grec *pgr;
383
384
if (!skb)
385
skb = igmpv3_newpack(dev, dev->mtu);
386
if (!skb)
387
return NULL;
388
pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
389
pgr->grec_type = type;
390
pgr->grec_auxwords = 0;
391
pgr->grec_nsrcs = 0;
392
pgr->grec_mca = pmc->multiaddr;
393
pih = igmpv3_report_hdr(skb);
394
pih->ngrec = htons(ntohs(pih->ngrec)+1);
395
*ppgr = pgr;
396
return skb;
397
}
398
399
#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
400
skb_tailroom(skb)) : 0)
401
402
static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
403
int type, int gdeleted, int sdeleted)
404
{
405
struct net_device *dev = pmc->interface->dev;
406
struct igmpv3_report *pih;
407
struct igmpv3_grec *pgr = NULL;
408
struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
409
int scount, stotal, first, isquery, truncate;
410
411
if (pmc->multiaddr == IGMP_ALL_HOSTS)
412
return skb;
413
414
isquery = type == IGMPV3_MODE_IS_INCLUDE ||
415
type == IGMPV3_MODE_IS_EXCLUDE;
416
truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
417
type == IGMPV3_CHANGE_TO_EXCLUDE;
418
419
stotal = scount = 0;
420
421
psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
422
423
if (!*psf_list)
424
goto empty_source;
425
426
pih = skb ? igmpv3_report_hdr(skb) : NULL;
427
428
/* EX and TO_EX get a fresh packet, if needed */
429
if (truncate) {
430
if (pih && pih->ngrec &&
431
AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
432
if (skb)
433
igmpv3_sendpack(skb);
434
skb = igmpv3_newpack(dev, dev->mtu);
435
}
436
}
437
first = 1;
438
psf_prev = NULL;
439
for (psf=*psf_list; psf; psf=psf_next) {
440
__be32 *psrc;
441
442
psf_next = psf->sf_next;
443
444
if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
445
psf_prev = psf;
446
continue;
447
}
448
449
/* clear marks on query responses */
450
if (isquery)
451
psf->sf_gsresp = 0;
452
453
if (AVAILABLE(skb) < sizeof(__be32) +
454
first*sizeof(struct igmpv3_grec)) {
455
if (truncate && !first)
456
break; /* truncate these */
457
if (pgr)
458
pgr->grec_nsrcs = htons(scount);
459
if (skb)
460
igmpv3_sendpack(skb);
461
skb = igmpv3_newpack(dev, dev->mtu);
462
first = 1;
463
scount = 0;
464
}
465
if (first) {
466
skb = add_grhead(skb, pmc, type, &pgr);
467
first = 0;
468
}
469
if (!skb)
470
return NULL;
471
psrc = (__be32 *)skb_put(skb, sizeof(__be32));
472
*psrc = psf->sf_inaddr;
473
scount++; stotal++;
474
if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
475
type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
476
psf->sf_crcount--;
477
if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
478
if (psf_prev)
479
psf_prev->sf_next = psf->sf_next;
480
else
481
*psf_list = psf->sf_next;
482
kfree(psf);
483
continue;
484
}
485
}
486
psf_prev = psf;
487
}
488
489
empty_source:
490
if (!stotal) {
491
if (type == IGMPV3_ALLOW_NEW_SOURCES ||
492
type == IGMPV3_BLOCK_OLD_SOURCES)
493
return skb;
494
if (pmc->crcount || isquery) {
495
/* make sure we have room for group header */
496
if (skb && AVAILABLE(skb)<sizeof(struct igmpv3_grec)) {
497
igmpv3_sendpack(skb);
498
skb = NULL; /* add_grhead will get a new one */
499
}
500
skb = add_grhead(skb, pmc, type, &pgr);
501
}
502
}
503
if (pgr)
504
pgr->grec_nsrcs = htons(scount);
505
506
if (isquery)
507
pmc->gsquery = 0; /* clear query state on report */
508
return skb;
509
}
510
511
static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
512
{
513
struct sk_buff *skb = NULL;
514
int type;
515
516
if (!pmc) {
517
rcu_read_lock();
518
for_each_pmc_rcu(in_dev, pmc) {
519
if (pmc->multiaddr == IGMP_ALL_HOSTS)
520
continue;
521
spin_lock_bh(&pmc->lock);
522
if (pmc->sfcount[MCAST_EXCLUDE])
523
type = IGMPV3_MODE_IS_EXCLUDE;
524
else
525
type = IGMPV3_MODE_IS_INCLUDE;
526
skb = add_grec(skb, pmc, type, 0, 0);
527
spin_unlock_bh(&pmc->lock);
528
}
529
rcu_read_unlock();
530
} else {
531
spin_lock_bh(&pmc->lock);
532
if (pmc->sfcount[MCAST_EXCLUDE])
533
type = IGMPV3_MODE_IS_EXCLUDE;
534
else
535
type = IGMPV3_MODE_IS_INCLUDE;
536
skb = add_grec(skb, pmc, type, 0, 0);
537
spin_unlock_bh(&pmc->lock);
538
}
539
if (!skb)
540
return 0;
541
return igmpv3_sendpack(skb);
542
}
543
544
/*
545
* remove zero-count source records from a source filter list
546
*/
547
static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
548
{
549
struct ip_sf_list *psf_prev, *psf_next, *psf;
550
551
psf_prev = NULL;
552
for (psf=*ppsf; psf; psf = psf_next) {
553
psf_next = psf->sf_next;
554
if (psf->sf_crcount == 0) {
555
if (psf_prev)
556
psf_prev->sf_next = psf->sf_next;
557
else
558
*ppsf = psf->sf_next;
559
kfree(psf);
560
} else
561
psf_prev = psf;
562
}
563
}
564
565
static void igmpv3_send_cr(struct in_device *in_dev)
566
{
567
struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
568
struct sk_buff *skb = NULL;
569
int type, dtype;
570
571
rcu_read_lock();
572
spin_lock_bh(&in_dev->mc_tomb_lock);
573
574
/* deleted MCA's */
575
pmc_prev = NULL;
576
for (pmc=in_dev->mc_tomb; pmc; pmc=pmc_next) {
577
pmc_next = pmc->next;
578
if (pmc->sfmode == MCAST_INCLUDE) {
579
type = IGMPV3_BLOCK_OLD_SOURCES;
580
dtype = IGMPV3_BLOCK_OLD_SOURCES;
581
skb = add_grec(skb, pmc, type, 1, 0);
582
skb = add_grec(skb, pmc, dtype, 1, 1);
583
}
584
if (pmc->crcount) {
585
if (pmc->sfmode == MCAST_EXCLUDE) {
586
type = IGMPV3_CHANGE_TO_INCLUDE;
587
skb = add_grec(skb, pmc, type, 1, 0);
588
}
589
pmc->crcount--;
590
if (pmc->crcount == 0) {
591
igmpv3_clear_zeros(&pmc->tomb);
592
igmpv3_clear_zeros(&pmc->sources);
593
}
594
}
595
if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) {
596
if (pmc_prev)
597
pmc_prev->next = pmc_next;
598
else
599
in_dev->mc_tomb = pmc_next;
600
in_dev_put(pmc->interface);
601
kfree(pmc);
602
} else
603
pmc_prev = pmc;
604
}
605
spin_unlock_bh(&in_dev->mc_tomb_lock);
606
607
/* change recs */
608
for_each_pmc_rcu(in_dev, pmc) {
609
spin_lock_bh(&pmc->lock);
610
if (pmc->sfcount[MCAST_EXCLUDE]) {
611
type = IGMPV3_BLOCK_OLD_SOURCES;
612
dtype = IGMPV3_ALLOW_NEW_SOURCES;
613
} else {
614
type = IGMPV3_ALLOW_NEW_SOURCES;
615
dtype = IGMPV3_BLOCK_OLD_SOURCES;
616
}
617
skb = add_grec(skb, pmc, type, 0, 0);
618
skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */
619
620
/* filter mode changes */
621
if (pmc->crcount) {
622
if (pmc->sfmode == MCAST_EXCLUDE)
623
type = IGMPV3_CHANGE_TO_EXCLUDE;
624
else
625
type = IGMPV3_CHANGE_TO_INCLUDE;
626
skb = add_grec(skb, pmc, type, 0, 0);
627
pmc->crcount--;
628
}
629
spin_unlock_bh(&pmc->lock);
630
}
631
rcu_read_unlock();
632
633
if (!skb)
634
return;
635
(void) igmpv3_sendpack(skb);
636
}
637
638
static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
639
int type)
640
{
641
struct sk_buff *skb;
642
struct iphdr *iph;
643
struct igmphdr *ih;
644
struct rtable *rt;
645
struct net_device *dev = in_dev->dev;
646
struct net *net = dev_net(dev);
647
__be32 group = pmc ? pmc->multiaddr : 0;
648
struct flowi4 fl4;
649
__be32 dst;
650
651
if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
652
return igmpv3_send_report(in_dev, pmc);
653
else if (type == IGMP_HOST_LEAVE_MESSAGE)
654
dst = IGMP_ALL_ROUTER;
655
else
656
dst = group;
657
658
rt = ip_route_output_ports(net, &fl4, NULL, dst, 0,
659
0, 0,
660
IPPROTO_IGMP, 0, dev->ifindex);
661
if (IS_ERR(rt))
662
return -1;
663
664
skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
665
if (skb == NULL) {
666
ip_rt_put(rt);
667
return -1;
668
}
669
670
skb_dst_set(skb, &rt->dst);
671
672
skb_reserve(skb, LL_RESERVED_SPACE(dev));
673
674
skb_reset_network_header(skb);
675
iph = ip_hdr(skb);
676
skb_put(skb, sizeof(struct iphdr) + 4);
677
678
iph->version = 4;
679
iph->ihl = (sizeof(struct iphdr)+4)>>2;
680
iph->tos = 0xc0;
681
iph->frag_off = htons(IP_DF);
682
iph->ttl = 1;
683
iph->daddr = dst;
684
iph->saddr = fl4.saddr;
685
iph->protocol = IPPROTO_IGMP;
686
ip_select_ident(iph, &rt->dst, NULL);
687
((u8*)&iph[1])[0] = IPOPT_RA;
688
((u8*)&iph[1])[1] = 4;
689
((u8*)&iph[1])[2] = 0;
690
((u8*)&iph[1])[3] = 0;
691
692
ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
693
ih->type = type;
694
ih->code = 0;
695
ih->csum = 0;
696
ih->group = group;
697
ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
698
699
return ip_local_out(skb);
700
}
701
702
static void igmp_gq_timer_expire(unsigned long data)
703
{
704
struct in_device *in_dev = (struct in_device *)data;
705
706
in_dev->mr_gq_running = 0;
707
igmpv3_send_report(in_dev, NULL);
708
__in_dev_put(in_dev);
709
}
710
711
static void igmp_ifc_timer_expire(unsigned long data)
712
{
713
struct in_device *in_dev = (struct in_device *)data;
714
715
igmpv3_send_cr(in_dev);
716
if (in_dev->mr_ifc_count) {
717
in_dev->mr_ifc_count--;
718
igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
719
}
720
__in_dev_put(in_dev);
721
}
722
723
static void igmp_ifc_event(struct in_device *in_dev)
724
{
725
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
726
return;
727
in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv :
728
IGMP_Unsolicited_Report_Count;
729
igmp_ifc_start_timer(in_dev, 1);
730
}
731
732
733
static void igmp_timer_expire(unsigned long data)
734
{
735
struct ip_mc_list *im=(struct ip_mc_list *)data;
736
struct in_device *in_dev = im->interface;
737
738
spin_lock(&im->lock);
739
im->tm_running = 0;
740
741
if (im->unsolicit_count) {
742
im->unsolicit_count--;
743
igmp_start_timer(im, IGMP_Unsolicited_Report_Interval);
744
}
745
im->reporter = 1;
746
spin_unlock(&im->lock);
747
748
if (IGMP_V1_SEEN(in_dev))
749
igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
750
else if (IGMP_V2_SEEN(in_dev))
751
igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
752
else
753
igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
754
755
ip_ma_put(im);
756
}
757
758
/* mark EXCLUDE-mode sources */
759
static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
760
{
761
struct ip_sf_list *psf;
762
int i, scount;
763
764
scount = 0;
765
for (psf=pmc->sources; psf; psf=psf->sf_next) {
766
if (scount == nsrcs)
767
break;
768
for (i=0; i<nsrcs; i++) {
769
/* skip inactive filters */
770
if (pmc->sfcount[MCAST_INCLUDE] ||
771
pmc->sfcount[MCAST_EXCLUDE] !=
772
psf->sf_count[MCAST_EXCLUDE])
773
continue;
774
if (srcs[i] == psf->sf_inaddr) {
775
scount++;
776
break;
777
}
778
}
779
}
780
pmc->gsquery = 0;
781
if (scount == nsrcs) /* all sources excluded */
782
return 0;
783
return 1;
784
}
785
786
static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
787
{
788
struct ip_sf_list *psf;
789
int i, scount;
790
791
if (pmc->sfmode == MCAST_EXCLUDE)
792
return igmp_xmarksources(pmc, nsrcs, srcs);
793
794
/* mark INCLUDE-mode sources */
795
scount = 0;
796
for (psf=pmc->sources; psf; psf=psf->sf_next) {
797
if (scount == nsrcs)
798
break;
799
for (i=0; i<nsrcs; i++)
800
if (srcs[i] == psf->sf_inaddr) {
801
psf->sf_gsresp = 1;
802
scount++;
803
break;
804
}
805
}
806
if (!scount) {
807
pmc->gsquery = 0;
808
return 0;
809
}
810
pmc->gsquery = 1;
811
return 1;
812
}
813
814
static void igmp_heard_report(struct in_device *in_dev, __be32 group)
815
{
816
struct ip_mc_list *im;
817
818
/* Timers are only set for non-local groups */
819
820
if (group == IGMP_ALL_HOSTS)
821
return;
822
823
rcu_read_lock();
824
for_each_pmc_rcu(in_dev, im) {
825
if (im->multiaddr == group) {
826
igmp_stop_timer(im);
827
break;
828
}
829
}
830
rcu_read_unlock();
831
}
832
833
static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
834
int len)
835
{
836
struct igmphdr *ih = igmp_hdr(skb);
837
struct igmpv3_query *ih3 = igmpv3_query_hdr(skb);
838
struct ip_mc_list *im;
839
__be32 group = ih->group;
840
int max_delay;
841
int mark = 0;
842
843
844
if (len == 8) {
845
if (ih->code == 0) {
846
/* Alas, old v1 router presents here. */
847
848
max_delay = IGMP_Query_Response_Interval;
849
in_dev->mr_v1_seen = jiffies +
850
IGMP_V1_Router_Present_Timeout;
851
group = 0;
852
} else {
853
/* v2 router present */
854
max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
855
in_dev->mr_v2_seen = jiffies +
856
IGMP_V2_Router_Present_Timeout;
857
}
858
/* cancel the interface change timer */
859
in_dev->mr_ifc_count = 0;
860
if (del_timer(&in_dev->mr_ifc_timer))
861
__in_dev_put(in_dev);
862
/* clear deleted report items */
863
igmpv3_clear_delrec(in_dev);
864
} else if (len < 12) {
865
return; /* ignore bogus packet; freed by caller */
866
} else if (IGMP_V1_SEEN(in_dev)) {
867
/* This is a v3 query with v1 queriers present */
868
max_delay = IGMP_Query_Response_Interval;
869
group = 0;
870
} else if (IGMP_V2_SEEN(in_dev)) {
871
/* this is a v3 query with v2 queriers present;
872
* Interpretation of the max_delay code is problematic here.
873
* A real v2 host would use ih_code directly, while v3 has a
874
* different encoding. We use the v3 encoding as more likely
875
* to be intended in a v3 query.
876
*/
877
max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
878
} else { /* v3 */
879
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
880
return;
881
882
ih3 = igmpv3_query_hdr(skb);
883
if (ih3->nsrcs) {
884
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
885
+ ntohs(ih3->nsrcs)*sizeof(__be32)))
886
return;
887
ih3 = igmpv3_query_hdr(skb);
888
}
889
890
max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
891
if (!max_delay)
892
max_delay = 1; /* can't mod w/ 0 */
893
in_dev->mr_maxdelay = max_delay;
894
if (ih3->qrv)
895
in_dev->mr_qrv = ih3->qrv;
896
if (!group) { /* general query */
897
if (ih3->nsrcs)
898
return; /* no sources allowed */
899
igmp_gq_start_timer(in_dev);
900
return;
901
}
902
/* mark sources to include, if group & source-specific */
903
mark = ih3->nsrcs != 0;
904
}
905
906
/*
907
* - Start the timers in all of our membership records
908
* that the query applies to for the interface on
909
* which the query arrived excl. those that belong
910
* to a "local" group (224.0.0.X)
911
* - For timers already running check if they need to
912
* be reset.
913
* - Use the igmp->igmp_code field as the maximum
914
* delay possible
915
*/
916
rcu_read_lock();
917
for_each_pmc_rcu(in_dev, im) {
918
int changed;
919
920
if (group && group != im->multiaddr)
921
continue;
922
if (im->multiaddr == IGMP_ALL_HOSTS)
923
continue;
924
spin_lock_bh(&im->lock);
925
if (im->tm_running)
926
im->gsquery = im->gsquery && mark;
927
else
928
im->gsquery = mark;
929
changed = !im->gsquery ||
930
igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
931
spin_unlock_bh(&im->lock);
932
if (changed)
933
igmp_mod_timer(im, max_delay);
934
}
935
rcu_read_unlock();
936
}
937
938
/* called in rcu_read_lock() section */
939
int igmp_rcv(struct sk_buff *skb)
940
{
941
/* This basically follows the spec line by line -- see RFC1112 */
942
struct igmphdr *ih;
943
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
944
int len = skb->len;
945
946
if (in_dev == NULL)
947
goto drop;
948
949
if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
950
goto drop;
951
952
switch (skb->ip_summed) {
953
case CHECKSUM_COMPLETE:
954
if (!csum_fold(skb->csum))
955
break;
956
/* fall through */
957
case CHECKSUM_NONE:
958
skb->csum = 0;
959
if (__skb_checksum_complete(skb))
960
goto drop;
961
}
962
963
ih = igmp_hdr(skb);
964
switch (ih->type) {
965
case IGMP_HOST_MEMBERSHIP_QUERY:
966
igmp_heard_query(in_dev, skb, len);
967
break;
968
case IGMP_HOST_MEMBERSHIP_REPORT:
969
case IGMPV2_HOST_MEMBERSHIP_REPORT:
970
/* Is it our report looped back? */
971
if (rt_is_output_route(skb_rtable(skb)))
972
break;
973
/* don't rely on MC router hearing unicast reports */
974
if (skb->pkt_type == PACKET_MULTICAST ||
975
skb->pkt_type == PACKET_BROADCAST)
976
igmp_heard_report(in_dev, ih->group);
977
break;
978
case IGMP_PIM:
979
#ifdef CONFIG_IP_PIMSM_V1
980
return pim_rcv_v1(skb);
981
#endif
982
case IGMPV3_HOST_MEMBERSHIP_REPORT:
983
case IGMP_DVMRP:
984
case IGMP_TRACE:
985
case IGMP_HOST_LEAVE_MESSAGE:
986
case IGMP_MTRACE:
987
case IGMP_MTRACE_RESP:
988
break;
989
default:
990
break;
991
}
992
993
drop:
994
kfree_skb(skb);
995
return 0;
996
}
997
998
#endif
999
1000
1001
/*
1002
* Add a filter to a device
1003
*/
1004
1005
static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
1006
{
1007
char buf[MAX_ADDR_LEN];
1008
struct net_device *dev = in_dev->dev;
1009
1010
/* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
1011
We will get multicast token leakage, when IFF_MULTICAST
1012
is changed. This check should be done in dev->set_multicast_list
1013
routine. Something sort of:
1014
if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
1015
--ANK
1016
*/
1017
if (arp_mc_map(addr, buf, dev, 0) == 0)
1018
dev_mc_add(dev, buf);
1019
}
1020
1021
/*
1022
* Remove a filter from a device
1023
*/
1024
1025
static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
1026
{
1027
char buf[MAX_ADDR_LEN];
1028
struct net_device *dev = in_dev->dev;
1029
1030
if (arp_mc_map(addr, buf, dev, 0) == 0)
1031
dev_mc_del(dev, buf);
1032
}
1033
1034
#ifdef CONFIG_IP_MULTICAST
1035
/*
1036
* deleted ip_mc_list manipulation
1037
*/
1038
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1039
{
1040
struct ip_mc_list *pmc;
1041
1042
/* this is an "ip_mc_list" for convenience; only the fields below
1043
* are actually used. In particular, the refcnt and users are not
1044
* used for management of the delete list. Using the same structure
1045
* for deleted items allows change reports to use common code with
1046
* non-deleted or query-response MCA's.
1047
*/
1048
pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
1049
if (!pmc)
1050
return;
1051
spin_lock_bh(&im->lock);
1052
pmc->interface = im->interface;
1053
in_dev_hold(in_dev);
1054
pmc->multiaddr = im->multiaddr;
1055
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1056
IGMP_Unsolicited_Report_Count;
1057
pmc->sfmode = im->sfmode;
1058
if (pmc->sfmode == MCAST_INCLUDE) {
1059
struct ip_sf_list *psf;
1060
1061
pmc->tomb = im->tomb;
1062
pmc->sources = im->sources;
1063
im->tomb = im->sources = NULL;
1064
for (psf=pmc->sources; psf; psf=psf->sf_next)
1065
psf->sf_crcount = pmc->crcount;
1066
}
1067
spin_unlock_bh(&im->lock);
1068
1069
spin_lock_bh(&in_dev->mc_tomb_lock);
1070
pmc->next = in_dev->mc_tomb;
1071
in_dev->mc_tomb = pmc;
1072
spin_unlock_bh(&in_dev->mc_tomb_lock);
1073
}
1074
1075
static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
1076
{
1077
struct ip_mc_list *pmc, *pmc_prev;
1078
struct ip_sf_list *psf, *psf_next;
1079
1080
spin_lock_bh(&in_dev->mc_tomb_lock);
1081
pmc_prev = NULL;
1082
for (pmc=in_dev->mc_tomb; pmc; pmc=pmc->next) {
1083
if (pmc->multiaddr == multiaddr)
1084
break;
1085
pmc_prev = pmc;
1086
}
1087
if (pmc) {
1088
if (pmc_prev)
1089
pmc_prev->next = pmc->next;
1090
else
1091
in_dev->mc_tomb = pmc->next;
1092
}
1093
spin_unlock_bh(&in_dev->mc_tomb_lock);
1094
if (pmc) {
1095
for (psf=pmc->tomb; psf; psf=psf_next) {
1096
psf_next = psf->sf_next;
1097
kfree(psf);
1098
}
1099
in_dev_put(pmc->interface);
1100
kfree(pmc);
1101
}
1102
}
1103
1104
static void igmpv3_clear_delrec(struct in_device *in_dev)
1105
{
1106
struct ip_mc_list *pmc, *nextpmc;
1107
1108
spin_lock_bh(&in_dev->mc_tomb_lock);
1109
pmc = in_dev->mc_tomb;
1110
in_dev->mc_tomb = NULL;
1111
spin_unlock_bh(&in_dev->mc_tomb_lock);
1112
1113
for (; pmc; pmc = nextpmc) {
1114
nextpmc = pmc->next;
1115
ip_mc_clear_src(pmc);
1116
in_dev_put(pmc->interface);
1117
kfree(pmc);
1118
}
1119
/* clear dead sources, too */
1120
rcu_read_lock();
1121
for_each_pmc_rcu(in_dev, pmc) {
1122
struct ip_sf_list *psf, *psf_next;
1123
1124
spin_lock_bh(&pmc->lock);
1125
psf = pmc->tomb;
1126
pmc->tomb = NULL;
1127
spin_unlock_bh(&pmc->lock);
1128
for (; psf; psf=psf_next) {
1129
psf_next = psf->sf_next;
1130
kfree(psf);
1131
}
1132
}
1133
rcu_read_unlock();
1134
}
1135
#endif
1136
1137
static void igmp_group_dropped(struct ip_mc_list *im)
1138
{
1139
struct in_device *in_dev = im->interface;
1140
#ifdef CONFIG_IP_MULTICAST
1141
int reporter;
1142
#endif
1143
1144
if (im->loaded) {
1145
im->loaded = 0;
1146
ip_mc_filter_del(in_dev, im->multiaddr);
1147
}
1148
1149
#ifdef CONFIG_IP_MULTICAST
1150
if (im->multiaddr == IGMP_ALL_HOSTS)
1151
return;
1152
1153
reporter = im->reporter;
1154
igmp_stop_timer(im);
1155
1156
if (!in_dev->dead) {
1157
if (IGMP_V1_SEEN(in_dev))
1158
return;
1159
if (IGMP_V2_SEEN(in_dev)) {
1160
if (reporter)
1161
igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
1162
return;
1163
}
1164
/* IGMPv3 */
1165
igmpv3_add_delrec(in_dev, im);
1166
1167
igmp_ifc_event(in_dev);
1168
}
1169
#endif
1170
}
1171
1172
static void igmp_group_added(struct ip_mc_list *im)
1173
{
1174
struct in_device *in_dev = im->interface;
1175
1176
if (im->loaded == 0) {
1177
im->loaded = 1;
1178
ip_mc_filter_add(in_dev, im->multiaddr);
1179
}
1180
1181
#ifdef CONFIG_IP_MULTICAST
1182
if (im->multiaddr == IGMP_ALL_HOSTS)
1183
return;
1184
1185
if (in_dev->dead)
1186
return;
1187
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
1188
spin_lock_bh(&im->lock);
1189
igmp_start_timer(im, IGMP_Initial_Report_Delay);
1190
spin_unlock_bh(&im->lock);
1191
return;
1192
}
1193
/* else, v3 */
1194
1195
im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1196
IGMP_Unsolicited_Report_Count;
1197
igmp_ifc_event(in_dev);
1198
#endif
1199
}
1200
1201
1202
/*
1203
* Multicast list managers
1204
*/
1205
1206
1207
/*
1208
* A socket has joined a multicast group on device dev.
1209
*/
1210
1211
void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1212
{
1213
struct ip_mc_list *im;
1214
1215
ASSERT_RTNL();
1216
1217
for_each_pmc_rtnl(in_dev, im) {
1218
if (im->multiaddr == addr) {
1219
im->users++;
1220
ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
1221
goto out;
1222
}
1223
}
1224
1225
im = kzalloc(sizeof(*im), GFP_KERNEL);
1226
if (!im)
1227
goto out;
1228
1229
im->users = 1;
1230
im->interface = in_dev;
1231
in_dev_hold(in_dev);
1232
im->multiaddr = addr;
1233
/* initial mode is (EX, empty) */
1234
im->sfmode = MCAST_EXCLUDE;
1235
im->sfcount[MCAST_EXCLUDE] = 1;
1236
atomic_set(&im->refcnt, 1);
1237
spin_lock_init(&im->lock);
1238
#ifdef CONFIG_IP_MULTICAST
1239
setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
1240
im->unsolicit_count = IGMP_Unsolicited_Report_Count;
1241
#endif
1242
1243
im->next_rcu = in_dev->mc_list;
1244
in_dev->mc_count++;
1245
rcu_assign_pointer(in_dev->mc_list, im);
1246
1247
#ifdef CONFIG_IP_MULTICAST
1248
igmpv3_del_delrec(in_dev, im->multiaddr);
1249
#endif
1250
igmp_group_added(im);
1251
if (!in_dev->dead)
1252
ip_rt_multicast_event(in_dev);
1253
out:
1254
return;
1255
}
1256
EXPORT_SYMBOL(ip_mc_inc_group);
1257
1258
/*
1259
* Resend IGMP JOIN report; used for bonding.
1260
* Called with rcu_read_lock()
1261
*/
1262
void ip_mc_rejoin_groups(struct in_device *in_dev)
1263
{
1264
#ifdef CONFIG_IP_MULTICAST
1265
struct ip_mc_list *im;
1266
int type;
1267
1268
for_each_pmc_rcu(in_dev, im) {
1269
if (im->multiaddr == IGMP_ALL_HOSTS)
1270
continue;
1271
1272
/* a failover is happening and switches
1273
* must be notified immediately
1274
*/
1275
if (IGMP_V1_SEEN(in_dev))
1276
type = IGMP_HOST_MEMBERSHIP_REPORT;
1277
else if (IGMP_V2_SEEN(in_dev))
1278
type = IGMPV2_HOST_MEMBERSHIP_REPORT;
1279
else
1280
type = IGMPV3_HOST_MEMBERSHIP_REPORT;
1281
igmp_send_report(in_dev, im, type);
1282
}
1283
#endif
1284
}
1285
EXPORT_SYMBOL(ip_mc_rejoin_groups);
1286
1287
/*
1288
* A socket has left a multicast group on device dev
1289
*/
1290
1291
void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1292
{
1293
struct ip_mc_list *i;
1294
struct ip_mc_list __rcu **ip;
1295
1296
ASSERT_RTNL();
1297
1298
for (ip = &in_dev->mc_list;
1299
(i = rtnl_dereference(*ip)) != NULL;
1300
ip = &i->next_rcu) {
1301
if (i->multiaddr == addr) {
1302
if (--i->users == 0) {
1303
*ip = i->next_rcu;
1304
in_dev->mc_count--;
1305
igmp_group_dropped(i);
1306
ip_mc_clear_src(i);
1307
1308
if (!in_dev->dead)
1309
ip_rt_multicast_event(in_dev);
1310
1311
ip_ma_put(i);
1312
return;
1313
}
1314
break;
1315
}
1316
}
1317
}
1318
EXPORT_SYMBOL(ip_mc_dec_group);
1319
1320
/* Device changing type */
1321
1322
void ip_mc_unmap(struct in_device *in_dev)
1323
{
1324
struct ip_mc_list *pmc;
1325
1326
ASSERT_RTNL();
1327
1328
for_each_pmc_rtnl(in_dev, pmc)
1329
igmp_group_dropped(pmc);
1330
}
1331
1332
void ip_mc_remap(struct in_device *in_dev)
1333
{
1334
struct ip_mc_list *pmc;
1335
1336
ASSERT_RTNL();
1337
1338
for_each_pmc_rtnl(in_dev, pmc)
1339
igmp_group_added(pmc);
1340
}
1341
1342
/* Device going down */
1343
1344
void ip_mc_down(struct in_device *in_dev)
1345
{
1346
struct ip_mc_list *pmc;
1347
1348
ASSERT_RTNL();
1349
1350
for_each_pmc_rtnl(in_dev, pmc)
1351
igmp_group_dropped(pmc);
1352
1353
#ifdef CONFIG_IP_MULTICAST
1354
in_dev->mr_ifc_count = 0;
1355
if (del_timer(&in_dev->mr_ifc_timer))
1356
__in_dev_put(in_dev);
1357
in_dev->mr_gq_running = 0;
1358
if (del_timer(&in_dev->mr_gq_timer))
1359
__in_dev_put(in_dev);
1360
igmpv3_clear_delrec(in_dev);
1361
#endif
1362
1363
ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
1364
}
1365
1366
void ip_mc_init_dev(struct in_device *in_dev)
1367
{
1368
ASSERT_RTNL();
1369
1370
in_dev->mc_tomb = NULL;
1371
#ifdef CONFIG_IP_MULTICAST
1372
in_dev->mr_gq_running = 0;
1373
setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire,
1374
(unsigned long)in_dev);
1375
in_dev->mr_ifc_count = 0;
1376
in_dev->mc_count = 0;
1377
setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
1378
(unsigned long)in_dev);
1379
in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
1380
#endif
1381
1382
spin_lock_init(&in_dev->mc_tomb_lock);
1383
}
1384
1385
/* Device going up */
1386
1387
void ip_mc_up(struct in_device *in_dev)
1388
{
1389
struct ip_mc_list *pmc;
1390
1391
ASSERT_RTNL();
1392
1393
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1394
1395
for_each_pmc_rtnl(in_dev, pmc)
1396
igmp_group_added(pmc);
1397
}
1398
1399
/*
1400
* Device is about to be destroyed: clean up.
1401
*/
1402
1403
void ip_mc_destroy_dev(struct in_device *in_dev)
1404
{
1405
struct ip_mc_list *i;
1406
1407
ASSERT_RTNL();
1408
1409
/* Deactivate timers */
1410
ip_mc_down(in_dev);
1411
1412
while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
1413
in_dev->mc_list = i->next_rcu;
1414
in_dev->mc_count--;
1415
1416
/* We've dropped the groups in ip_mc_down already */
1417
ip_mc_clear_src(i);
1418
ip_ma_put(i);
1419
}
1420
}
1421
1422
/* RTNL is locked */
1423
static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1424
{
1425
struct net_device *dev = NULL;
1426
struct in_device *idev = NULL;
1427
1428
if (imr->imr_ifindex) {
1429
idev = inetdev_by_index(net, imr->imr_ifindex);
1430
return idev;
1431
}
1432
if (imr->imr_address.s_addr) {
1433
dev = __ip_dev_find(net, imr->imr_address.s_addr, false);
1434
if (!dev)
1435
return NULL;
1436
}
1437
1438
if (!dev) {
1439
struct rtable *rt = ip_route_output(net,
1440
imr->imr_multiaddr.s_addr,
1441
0, 0, 0);
1442
if (!IS_ERR(rt)) {
1443
dev = rt->dst.dev;
1444
ip_rt_put(rt);
1445
}
1446
}
1447
if (dev) {
1448
imr->imr_ifindex = dev->ifindex;
1449
idev = __in_dev_get_rtnl(dev);
1450
}
1451
return idev;
1452
}
1453
1454
/*
1455
* Join a socket to a group
1456
*/
1457
int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS;
1458
int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF;
1459
1460
1461
static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
1462
__be32 *psfsrc)
1463
{
1464
struct ip_sf_list *psf, *psf_prev;
1465
int rv = 0;
1466
1467
psf_prev = NULL;
1468
for (psf=pmc->sources; psf; psf=psf->sf_next) {
1469
if (psf->sf_inaddr == *psfsrc)
1470
break;
1471
psf_prev = psf;
1472
}
1473
if (!psf || psf->sf_count[sfmode] == 0) {
1474
/* source filter not found, or count wrong => bug */
1475
return -ESRCH;
1476
}
1477
psf->sf_count[sfmode]--;
1478
if (psf->sf_count[sfmode] == 0) {
1479
ip_rt_multicast_event(pmc->interface);
1480
}
1481
if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
1482
#ifdef CONFIG_IP_MULTICAST
1483
struct in_device *in_dev = pmc->interface;
1484
#endif
1485
1486
/* no more filters for this source */
1487
if (psf_prev)
1488
psf_prev->sf_next = psf->sf_next;
1489
else
1490
pmc->sources = psf->sf_next;
1491
#ifdef CONFIG_IP_MULTICAST
1492
if (psf->sf_oldin &&
1493
!IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
1494
psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1495
IGMP_Unsolicited_Report_Count;
1496
psf->sf_next = pmc->tomb;
1497
pmc->tomb = psf;
1498
rv = 1;
1499
} else
1500
#endif
1501
kfree(psf);
1502
}
1503
return rv;
1504
}
1505
1506
#ifndef CONFIG_IP_MULTICAST
1507
#define igmp_ifc_event(x) do { } while (0)
1508
#endif
1509
1510
static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1511
int sfcount, __be32 *psfsrc, int delta)
1512
{
1513
struct ip_mc_list *pmc;
1514
int changerec = 0;
1515
int i, err;
1516
1517
if (!in_dev)
1518
return -ENODEV;
1519
rcu_read_lock();
1520
for_each_pmc_rcu(in_dev, pmc) {
1521
if (*pmca == pmc->multiaddr)
1522
break;
1523
}
1524
if (!pmc) {
1525
/* MCA not found?? bug */
1526
rcu_read_unlock();
1527
return -ESRCH;
1528
}
1529
spin_lock_bh(&pmc->lock);
1530
rcu_read_unlock();
1531
#ifdef CONFIG_IP_MULTICAST
1532
sf_markstate(pmc);
1533
#endif
1534
if (!delta) {
1535
err = -EINVAL;
1536
if (!pmc->sfcount[sfmode])
1537
goto out_unlock;
1538
pmc->sfcount[sfmode]--;
1539
}
1540
err = 0;
1541
for (i=0; i<sfcount; i++) {
1542
int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
1543
1544
changerec |= rv > 0;
1545
if (!err && rv < 0)
1546
err = rv;
1547
}
1548
if (pmc->sfmode == MCAST_EXCLUDE &&
1549
pmc->sfcount[MCAST_EXCLUDE] == 0 &&
1550
pmc->sfcount[MCAST_INCLUDE]) {
1551
#ifdef CONFIG_IP_MULTICAST
1552
struct ip_sf_list *psf;
1553
#endif
1554
1555
/* filter mode change */
1556
pmc->sfmode = MCAST_INCLUDE;
1557
#ifdef CONFIG_IP_MULTICAST
1558
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1559
IGMP_Unsolicited_Report_Count;
1560
in_dev->mr_ifc_count = pmc->crcount;
1561
for (psf=pmc->sources; psf; psf = psf->sf_next)
1562
psf->sf_crcount = 0;
1563
igmp_ifc_event(pmc->interface);
1564
} else if (sf_setstate(pmc) || changerec) {
1565
igmp_ifc_event(pmc->interface);
1566
#endif
1567
}
1568
out_unlock:
1569
spin_unlock_bh(&pmc->lock);
1570
return err;
1571
}
1572
1573
/*
1574
* Add multicast single-source filter to the interface list
1575
*/
1576
static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
1577
__be32 *psfsrc, int delta)
1578
{
1579
struct ip_sf_list *psf, *psf_prev;
1580
1581
psf_prev = NULL;
1582
for (psf=pmc->sources; psf; psf=psf->sf_next) {
1583
if (psf->sf_inaddr == *psfsrc)
1584
break;
1585
psf_prev = psf;
1586
}
1587
if (!psf) {
1588
psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
1589
if (!psf)
1590
return -ENOBUFS;
1591
psf->sf_inaddr = *psfsrc;
1592
if (psf_prev) {
1593
psf_prev->sf_next = psf;
1594
} else
1595
pmc->sources = psf;
1596
}
1597
psf->sf_count[sfmode]++;
1598
if (psf->sf_count[sfmode] == 1) {
1599
ip_rt_multicast_event(pmc->interface);
1600
}
1601
return 0;
1602
}
1603
1604
#ifdef CONFIG_IP_MULTICAST
1605
static void sf_markstate(struct ip_mc_list *pmc)
1606
{
1607
struct ip_sf_list *psf;
1608
int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
1609
1610
for (psf=pmc->sources; psf; psf=psf->sf_next)
1611
if (pmc->sfcount[MCAST_EXCLUDE]) {
1612
psf->sf_oldin = mca_xcount ==
1613
psf->sf_count[MCAST_EXCLUDE] &&
1614
!psf->sf_count[MCAST_INCLUDE];
1615
} else
1616
psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
1617
}
1618
1619
static int sf_setstate(struct ip_mc_list *pmc)
1620
{
1621
struct ip_sf_list *psf, *dpsf;
1622
int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
1623
int qrv = pmc->interface->mr_qrv;
1624
int new_in, rv;
1625
1626
rv = 0;
1627
for (psf=pmc->sources; psf; psf=psf->sf_next) {
1628
if (pmc->sfcount[MCAST_EXCLUDE]) {
1629
new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
1630
!psf->sf_count[MCAST_INCLUDE];
1631
} else
1632
new_in = psf->sf_count[MCAST_INCLUDE] != 0;
1633
if (new_in) {
1634
if (!psf->sf_oldin) {
1635
struct ip_sf_list *prev = NULL;
1636
1637
for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) {
1638
if (dpsf->sf_inaddr == psf->sf_inaddr)
1639
break;
1640
prev = dpsf;
1641
}
1642
if (dpsf) {
1643
if (prev)
1644
prev->sf_next = dpsf->sf_next;
1645
else
1646
pmc->tomb = dpsf->sf_next;
1647
kfree(dpsf);
1648
}
1649
psf->sf_crcount = qrv;
1650
rv++;
1651
}
1652
} else if (psf->sf_oldin) {
1653
1654
psf->sf_crcount = 0;
1655
/*
1656
* add or update "delete" records if an active filter
1657
* is now inactive
1658
*/
1659
for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next)
1660
if (dpsf->sf_inaddr == psf->sf_inaddr)
1661
break;
1662
if (!dpsf) {
1663
dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
1664
if (!dpsf)
1665
continue;
1666
*dpsf = *psf;
1667
/* pmc->lock held by callers */
1668
dpsf->sf_next = pmc->tomb;
1669
pmc->tomb = dpsf;
1670
}
1671
dpsf->sf_crcount = qrv;
1672
rv++;
1673
}
1674
}
1675
return rv;
1676
}
1677
#endif
1678
1679
/*
1680
* Add multicast source filter list to the interface list
1681
*/
1682
static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
1683
int sfcount, __be32 *psfsrc, int delta)
1684
{
1685
struct ip_mc_list *pmc;
1686
int isexclude;
1687
int i, err;
1688
1689
if (!in_dev)
1690
return -ENODEV;
1691
rcu_read_lock();
1692
for_each_pmc_rcu(in_dev, pmc) {
1693
if (*pmca == pmc->multiaddr)
1694
break;
1695
}
1696
if (!pmc) {
1697
/* MCA not found?? bug */
1698
rcu_read_unlock();
1699
return -ESRCH;
1700
}
1701
spin_lock_bh(&pmc->lock);
1702
rcu_read_unlock();
1703
1704
#ifdef CONFIG_IP_MULTICAST
1705
sf_markstate(pmc);
1706
#endif
1707
isexclude = pmc->sfmode == MCAST_EXCLUDE;
1708
if (!delta)
1709
pmc->sfcount[sfmode]++;
1710
err = 0;
1711
for (i=0; i<sfcount; i++) {
1712
err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i], delta);
1713
if (err)
1714
break;
1715
}
1716
if (err) {
1717
int j;
1718
1719
pmc->sfcount[sfmode]--;
1720
for (j=0; j<i; j++)
1721
(void) ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
1722
} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
1723
#ifdef CONFIG_IP_MULTICAST
1724
struct ip_sf_list *psf;
1725
in_dev = pmc->interface;
1726
#endif
1727
1728
/* filter mode change */
1729
if (pmc->sfcount[MCAST_EXCLUDE])
1730
pmc->sfmode = MCAST_EXCLUDE;
1731
else if (pmc->sfcount[MCAST_INCLUDE])
1732
pmc->sfmode = MCAST_INCLUDE;
1733
#ifdef CONFIG_IP_MULTICAST
1734
/* else no filters; keep old mode for reports */
1735
1736
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1737
IGMP_Unsolicited_Report_Count;
1738
in_dev->mr_ifc_count = pmc->crcount;
1739
for (psf=pmc->sources; psf; psf = psf->sf_next)
1740
psf->sf_crcount = 0;
1741
igmp_ifc_event(in_dev);
1742
} else if (sf_setstate(pmc)) {
1743
igmp_ifc_event(in_dev);
1744
#endif
1745
}
1746
spin_unlock_bh(&pmc->lock);
1747
return err;
1748
}
1749
1750
static void ip_mc_clear_src(struct ip_mc_list *pmc)
1751
{
1752
struct ip_sf_list *psf, *nextpsf;
1753
1754
for (psf=pmc->tomb; psf; psf=nextpsf) {
1755
nextpsf = psf->sf_next;
1756
kfree(psf);
1757
}
1758
pmc->tomb = NULL;
1759
for (psf=pmc->sources; psf; psf=nextpsf) {
1760
nextpsf = psf->sf_next;
1761
kfree(psf);
1762
}
1763
pmc->sources = NULL;
1764
pmc->sfmode = MCAST_EXCLUDE;
1765
pmc->sfcount[MCAST_INCLUDE] = 0;
1766
pmc->sfcount[MCAST_EXCLUDE] = 1;
1767
}
1768
1769
1770
/*
1771
* Join a multicast group
1772
*/
1773
int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1774
{
1775
int err;
1776
__be32 addr = imr->imr_multiaddr.s_addr;
1777
struct ip_mc_socklist *iml = NULL, *i;
1778
struct in_device *in_dev;
1779
struct inet_sock *inet = inet_sk(sk);
1780
struct net *net = sock_net(sk);
1781
int ifindex;
1782
int count = 0;
1783
1784
if (!ipv4_is_multicast(addr))
1785
return -EINVAL;
1786
1787
rtnl_lock();
1788
1789
in_dev = ip_mc_find_dev(net, imr);
1790
1791
if (!in_dev) {
1792
iml = NULL;
1793
err = -ENODEV;
1794
goto done;
1795
}
1796
1797
err = -EADDRINUSE;
1798
ifindex = imr->imr_ifindex;
1799
for_each_pmc_rtnl(inet, i) {
1800
if (i->multi.imr_multiaddr.s_addr == addr &&
1801
i->multi.imr_ifindex == ifindex)
1802
goto done;
1803
count++;
1804
}
1805
err = -ENOBUFS;
1806
if (count >= sysctl_igmp_max_memberships)
1807
goto done;
1808
iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
1809
if (iml == NULL)
1810
goto done;
1811
1812
memcpy(&iml->multi, imr, sizeof(*imr));
1813
iml->next_rcu = inet->mc_list;
1814
iml->sflist = NULL;
1815
iml->sfmode = MCAST_EXCLUDE;
1816
rcu_assign_pointer(inet->mc_list, iml);
1817
ip_mc_inc_group(in_dev, addr);
1818
err = 0;
1819
done:
1820
rtnl_unlock();
1821
return err;
1822
}
1823
EXPORT_SYMBOL(ip_mc_join_group);
1824
1825
static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1826
struct in_device *in_dev)
1827
{
1828
struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
1829
int err;
1830
1831
if (psf == NULL) {
1832
/* any-source empty exclude case */
1833
return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1834
iml->sfmode, 0, NULL, 0);
1835
}
1836
err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1837
iml->sfmode, psf->sl_count, psf->sl_addr, 0);
1838
rcu_assign_pointer(iml->sflist, NULL);
1839
/* decrease mem now to avoid the memleak warning */
1840
atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
1841
kfree_rcu(psf, rcu);
1842
return err;
1843
}
1844
1845
/*
1846
* Ask a socket to leave a group.
1847
*/
1848
1849
int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1850
{
1851
struct inet_sock *inet = inet_sk(sk);
1852
struct ip_mc_socklist *iml;
1853
struct ip_mc_socklist __rcu **imlp;
1854
struct in_device *in_dev;
1855
struct net *net = sock_net(sk);
1856
__be32 group = imr->imr_multiaddr.s_addr;
1857
u32 ifindex;
1858
int ret = -EADDRNOTAVAIL;
1859
1860
rtnl_lock();
1861
in_dev = ip_mc_find_dev(net, imr);
1862
ifindex = imr->imr_ifindex;
1863
for (imlp = &inet->mc_list;
1864
(iml = rtnl_dereference(*imlp)) != NULL;
1865
imlp = &iml->next_rcu) {
1866
if (iml->multi.imr_multiaddr.s_addr != group)
1867
continue;
1868
if (ifindex) {
1869
if (iml->multi.imr_ifindex != ifindex)
1870
continue;
1871
} else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
1872
iml->multi.imr_address.s_addr)
1873
continue;
1874
1875
(void) ip_mc_leave_src(sk, iml, in_dev);
1876
1877
*imlp = iml->next_rcu;
1878
1879
if (in_dev)
1880
ip_mc_dec_group(in_dev, group);
1881
rtnl_unlock();
1882
/* decrease mem now to avoid the memleak warning */
1883
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1884
kfree_rcu(iml, rcu);
1885
return 0;
1886
}
1887
if (!in_dev)
1888
ret = -ENODEV;
1889
rtnl_unlock();
1890
return ret;
1891
}
1892
1893
int ip_mc_source(int add, int omode, struct sock *sk, struct
1894
ip_mreq_source *mreqs, int ifindex)
1895
{
1896
int err;
1897
struct ip_mreqn imr;
1898
__be32 addr = mreqs->imr_multiaddr;
1899
struct ip_mc_socklist *pmc;
1900
struct in_device *in_dev = NULL;
1901
struct inet_sock *inet = inet_sk(sk);
1902
struct ip_sf_socklist *psl;
1903
struct net *net = sock_net(sk);
1904
int leavegroup = 0;
1905
int i, j, rv;
1906
1907
if (!ipv4_is_multicast(addr))
1908
return -EINVAL;
1909
1910
rtnl_lock();
1911
1912
imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
1913
imr.imr_address.s_addr = mreqs->imr_interface;
1914
imr.imr_ifindex = ifindex;
1915
in_dev = ip_mc_find_dev(net, &imr);
1916
1917
if (!in_dev) {
1918
err = -ENODEV;
1919
goto done;
1920
}
1921
err = -EADDRNOTAVAIL;
1922
1923
for_each_pmc_rtnl(inet, pmc) {
1924
if ((pmc->multi.imr_multiaddr.s_addr ==
1925
imr.imr_multiaddr.s_addr) &&
1926
(pmc->multi.imr_ifindex == imr.imr_ifindex))
1927
break;
1928
}
1929
if (!pmc) { /* must have a prior join */
1930
err = -EINVAL;
1931
goto done;
1932
}
1933
/* if a source filter was set, must be the same mode as before */
1934
if (pmc->sflist) {
1935
if (pmc->sfmode != omode) {
1936
err = -EINVAL;
1937
goto done;
1938
}
1939
} else if (pmc->sfmode != omode) {
1940
/* allow mode switches for empty-set filters */
1941
ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0);
1942
ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
1943
NULL, 0);
1944
pmc->sfmode = omode;
1945
}
1946
1947
psl = rtnl_dereference(pmc->sflist);
1948
if (!add) {
1949
if (!psl)
1950
goto done; /* err = -EADDRNOTAVAIL */
1951
rv = !0;
1952
for (i=0; i<psl->sl_count; i++) {
1953
rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
1954
sizeof(__be32));
1955
if (rv == 0)
1956
break;
1957
}
1958
if (rv) /* source not found */
1959
goto done; /* err = -EADDRNOTAVAIL */
1960
1961
/* special case - (INCLUDE, empty) == LEAVE_GROUP */
1962
if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
1963
leavegroup = 1;
1964
goto done;
1965
}
1966
1967
/* update the interface filter */
1968
ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
1969
&mreqs->imr_sourceaddr, 1);
1970
1971
for (j=i+1; j<psl->sl_count; j++)
1972
psl->sl_addr[j-1] = psl->sl_addr[j];
1973
psl->sl_count--;
1974
err = 0;
1975
goto done;
1976
}
1977
/* else, add a new source to the filter */
1978
1979
if (psl && psl->sl_count >= sysctl_igmp_max_msf) {
1980
err = -ENOBUFS;
1981
goto done;
1982
}
1983
if (!psl || psl->sl_count == psl->sl_max) {
1984
struct ip_sf_socklist *newpsl;
1985
int count = IP_SFBLOCK;
1986
1987
if (psl)
1988
count += psl->sl_max;
1989
newpsl = sock_kmalloc(sk, IP_SFLSIZE(count), GFP_KERNEL);
1990
if (!newpsl) {
1991
err = -ENOBUFS;
1992
goto done;
1993
}
1994
newpsl->sl_max = count;
1995
newpsl->sl_count = count - IP_SFBLOCK;
1996
if (psl) {
1997
for (i=0; i<psl->sl_count; i++)
1998
newpsl->sl_addr[i] = psl->sl_addr[i];
1999
/* decrease mem now to avoid the memleak warning */
2000
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2001
kfree_rcu(psl, rcu);
2002
}
2003
rcu_assign_pointer(pmc->sflist, newpsl);
2004
psl = newpsl;
2005
}
2006
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
2007
for (i=0; i<psl->sl_count; i++) {
2008
rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
2009
sizeof(__be32));
2010
if (rv == 0)
2011
break;
2012
}
2013
if (rv == 0) /* address already there is an error */
2014
goto done;
2015
for (j=psl->sl_count-1; j>=i; j--)
2016
psl->sl_addr[j+1] = psl->sl_addr[j];
2017
psl->sl_addr[i] = mreqs->imr_sourceaddr;
2018
psl->sl_count++;
2019
err = 0;
2020
/* update the interface list */
2021
ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
2022
&mreqs->imr_sourceaddr, 1);
2023
done:
2024
rtnl_unlock();
2025
if (leavegroup)
2026
return ip_mc_leave_group(sk, &imr);
2027
return err;
2028
}
2029
2030
int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2031
{
2032
int err = 0;
2033
struct ip_mreqn imr;
2034
__be32 addr = msf->imsf_multiaddr;
2035
struct ip_mc_socklist *pmc;
2036
struct in_device *in_dev;
2037
struct inet_sock *inet = inet_sk(sk);
2038
struct ip_sf_socklist *newpsl, *psl;
2039
struct net *net = sock_net(sk);
2040
int leavegroup = 0;
2041
2042
if (!ipv4_is_multicast(addr))
2043
return -EINVAL;
2044
if (msf->imsf_fmode != MCAST_INCLUDE &&
2045
msf->imsf_fmode != MCAST_EXCLUDE)
2046
return -EINVAL;
2047
2048
rtnl_lock();
2049
2050
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2051
imr.imr_address.s_addr = msf->imsf_interface;
2052
imr.imr_ifindex = ifindex;
2053
in_dev = ip_mc_find_dev(net, &imr);
2054
2055
if (!in_dev) {
2056
err = -ENODEV;
2057
goto done;
2058
}
2059
2060
/* special case - (INCLUDE, empty) == LEAVE_GROUP */
2061
if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) {
2062
leavegroup = 1;
2063
goto done;
2064
}
2065
2066
for_each_pmc_rtnl(inet, pmc) {
2067
if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2068
pmc->multi.imr_ifindex == imr.imr_ifindex)
2069
break;
2070
}
2071
if (!pmc) { /* must have a prior join */
2072
err = -EINVAL;
2073
goto done;
2074
}
2075
if (msf->imsf_numsrc) {
2076
newpsl = sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc),
2077
GFP_KERNEL);
2078
if (!newpsl) {
2079
err = -ENOBUFS;
2080
goto done;
2081
}
2082
newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc;
2083
memcpy(newpsl->sl_addr, msf->imsf_slist,
2084
msf->imsf_numsrc * sizeof(msf->imsf_slist[0]));
2085
err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2086
msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0);
2087
if (err) {
2088
sock_kfree_s(sk, newpsl, IP_SFLSIZE(newpsl->sl_max));
2089
goto done;
2090
}
2091
} else {
2092
newpsl = NULL;
2093
(void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
2094
msf->imsf_fmode, 0, NULL, 0);
2095
}
2096
psl = rtnl_dereference(pmc->sflist);
2097
if (psl) {
2098
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2099
psl->sl_count, psl->sl_addr, 0);
2100
/* decrease mem now to avoid the memleak warning */
2101
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2102
kfree_rcu(psl, rcu);
2103
} else
2104
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2105
0, NULL, 0);
2106
rcu_assign_pointer(pmc->sflist, newpsl);
2107
pmc->sfmode = msf->imsf_fmode;
2108
err = 0;
2109
done:
2110
rtnl_unlock();
2111
if (leavegroup)
2112
err = ip_mc_leave_group(sk, &imr);
2113
return err;
2114
}
2115
2116
int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
2117
struct ip_msfilter __user *optval, int __user *optlen)
2118
{
2119
int err, len, count, copycount;
2120
struct ip_mreqn imr;
2121
__be32 addr = msf->imsf_multiaddr;
2122
struct ip_mc_socklist *pmc;
2123
struct in_device *in_dev;
2124
struct inet_sock *inet = inet_sk(sk);
2125
struct ip_sf_socklist *psl;
2126
struct net *net = sock_net(sk);
2127
2128
if (!ipv4_is_multicast(addr))
2129
return -EINVAL;
2130
2131
rtnl_lock();
2132
2133
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
2134
imr.imr_address.s_addr = msf->imsf_interface;
2135
imr.imr_ifindex = 0;
2136
in_dev = ip_mc_find_dev(net, &imr);
2137
2138
if (!in_dev) {
2139
err = -ENODEV;
2140
goto done;
2141
}
2142
err = -EADDRNOTAVAIL;
2143
2144
for_each_pmc_rtnl(inet, pmc) {
2145
if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
2146
pmc->multi.imr_ifindex == imr.imr_ifindex)
2147
break;
2148
}
2149
if (!pmc) /* must have a prior join */
2150
goto done;
2151
msf->imsf_fmode = pmc->sfmode;
2152
psl = rtnl_dereference(pmc->sflist);
2153
rtnl_unlock();
2154
if (!psl) {
2155
len = 0;
2156
count = 0;
2157
} else {
2158
count = psl->sl_count;
2159
}
2160
copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc;
2161
len = copycount * sizeof(psl->sl_addr[0]);
2162
msf->imsf_numsrc = count;
2163
if (put_user(IP_MSFILTER_SIZE(copycount), optlen) ||
2164
copy_to_user(optval, msf, IP_MSFILTER_SIZE(0))) {
2165
return -EFAULT;
2166
}
2167
if (len &&
2168
copy_to_user(&optval->imsf_slist[0], psl->sl_addr, len))
2169
return -EFAULT;
2170
return 0;
2171
done:
2172
rtnl_unlock();
2173
return err;
2174
}
2175
2176
int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
2177
struct group_filter __user *optval, int __user *optlen)
2178
{
2179
int err, i, count, copycount;
2180
struct sockaddr_in *psin;
2181
__be32 addr;
2182
struct ip_mc_socklist *pmc;
2183
struct inet_sock *inet = inet_sk(sk);
2184
struct ip_sf_socklist *psl;
2185
2186
psin = (struct sockaddr_in *)&gsf->gf_group;
2187
if (psin->sin_family != AF_INET)
2188
return -EINVAL;
2189
addr = psin->sin_addr.s_addr;
2190
if (!ipv4_is_multicast(addr))
2191
return -EINVAL;
2192
2193
rtnl_lock();
2194
2195
err = -EADDRNOTAVAIL;
2196
2197
for_each_pmc_rtnl(inet, pmc) {
2198
if (pmc->multi.imr_multiaddr.s_addr == addr &&
2199
pmc->multi.imr_ifindex == gsf->gf_interface)
2200
break;
2201
}
2202
if (!pmc) /* must have a prior join */
2203
goto done;
2204
gsf->gf_fmode = pmc->sfmode;
2205
psl = rtnl_dereference(pmc->sflist);
2206
rtnl_unlock();
2207
count = psl ? psl->sl_count : 0;
2208
copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
2209
gsf->gf_numsrc = count;
2210
if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
2211
copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
2212
return -EFAULT;
2213
}
2214
for (i=0; i<copycount; i++) {
2215
struct sockaddr_storage ss;
2216
2217
psin = (struct sockaddr_in *)&ss;
2218
memset(&ss, 0, sizeof(ss));
2219
psin->sin_family = AF_INET;
2220
psin->sin_addr.s_addr = psl->sl_addr[i];
2221
if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
2222
return -EFAULT;
2223
}
2224
return 0;
2225
done:
2226
rtnl_unlock();
2227
return err;
2228
}
2229
2230
/*
2231
* check if a multicast source filter allows delivery for a given <src,dst,intf>
2232
*/
2233
int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
2234
{
2235
struct inet_sock *inet = inet_sk(sk);
2236
struct ip_mc_socklist *pmc;
2237
struct ip_sf_socklist *psl;
2238
int i;
2239
int ret;
2240
2241
ret = 1;
2242
if (!ipv4_is_multicast(loc_addr))
2243
goto out;
2244
2245
rcu_read_lock();
2246
for_each_pmc_rcu(inet, pmc) {
2247
if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2248
pmc->multi.imr_ifindex == dif)
2249
break;
2250
}
2251
ret = inet->mc_all;
2252
if (!pmc)
2253
goto unlock;
2254
psl = rcu_dereference(pmc->sflist);
2255
ret = (pmc->sfmode == MCAST_EXCLUDE);
2256
if (!psl)
2257
goto unlock;
2258
2259
for (i=0; i<psl->sl_count; i++) {
2260
if (psl->sl_addr[i] == rmt_addr)
2261
break;
2262
}
2263
ret = 0;
2264
if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
2265
goto unlock;
2266
if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
2267
goto unlock;
2268
ret = 1;
2269
unlock:
2270
rcu_read_unlock();
2271
out:
2272
return ret;
2273
}
2274
2275
/*
2276
* A socket is closing.
2277
*/
2278
2279
void ip_mc_drop_socket(struct sock *sk)
2280
{
2281
struct inet_sock *inet = inet_sk(sk);
2282
struct ip_mc_socklist *iml;
2283
struct net *net = sock_net(sk);
2284
2285
if (inet->mc_list == NULL)
2286
return;
2287
2288
rtnl_lock();
2289
while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
2290
struct in_device *in_dev;
2291
2292
inet->mc_list = iml->next_rcu;
2293
in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
2294
(void) ip_mc_leave_src(sk, iml, in_dev);
2295
if (in_dev != NULL)
2296
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2297
/* decrease mem now to avoid the memleak warning */
2298
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2299
kfree_rcu(iml, rcu);
2300
}
2301
rtnl_unlock();
2302
}
2303
2304
/* called with rcu_read_lock() */
2305
int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
2306
{
2307
struct ip_mc_list *im;
2308
struct ip_sf_list *psf;
2309
int rv = 0;
2310
2311
for_each_pmc_rcu(in_dev, im) {
2312
if (im->multiaddr == mc_addr)
2313
break;
2314
}
2315
if (im && proto == IPPROTO_IGMP) {
2316
rv = 1;
2317
} else if (im) {
2318
if (src_addr) {
2319
for (psf=im->sources; psf; psf=psf->sf_next) {
2320
if (psf->sf_inaddr == src_addr)
2321
break;
2322
}
2323
if (psf)
2324
rv = psf->sf_count[MCAST_INCLUDE] ||
2325
psf->sf_count[MCAST_EXCLUDE] !=
2326
im->sfcount[MCAST_EXCLUDE];
2327
else
2328
rv = im->sfcount[MCAST_EXCLUDE] != 0;
2329
} else
2330
rv = 1; /* unspecified source; tentatively allow */
2331
}
2332
return rv;
2333
}
2334
2335
#if defined(CONFIG_PROC_FS)
2336
struct igmp_mc_iter_state {
2337
struct seq_net_private p;
2338
struct net_device *dev;
2339
struct in_device *in_dev;
2340
};
2341
2342
#define igmp_mc_seq_private(seq) ((struct igmp_mc_iter_state *)(seq)->private)
2343
2344
static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2345
{
2346
struct net *net = seq_file_net(seq);
2347
struct ip_mc_list *im = NULL;
2348
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2349
2350
state->in_dev = NULL;
2351
for_each_netdev_rcu(net, state->dev) {
2352
struct in_device *in_dev;
2353
2354
in_dev = __in_dev_get_rcu(state->dev);
2355
if (!in_dev)
2356
continue;
2357
im = rcu_dereference(in_dev->mc_list);
2358
if (im) {
2359
state->in_dev = in_dev;
2360
break;
2361
}
2362
}
2363
return im;
2364
}
2365
2366
static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
2367
{
2368
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2369
2370
im = rcu_dereference(im->next_rcu);
2371
while (!im) {
2372
state->dev = next_net_device_rcu(state->dev);
2373
if (!state->dev) {
2374
state->in_dev = NULL;
2375
break;
2376
}
2377
state->in_dev = __in_dev_get_rcu(state->dev);
2378
if (!state->in_dev)
2379
continue;
2380
im = rcu_dereference(state->in_dev->mc_list);
2381
}
2382
return im;
2383
}
2384
2385
static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
2386
{
2387
struct ip_mc_list *im = igmp_mc_get_first(seq);
2388
if (im)
2389
while (pos && (im = igmp_mc_get_next(seq, im)) != NULL)
2390
--pos;
2391
return pos ? NULL : im;
2392
}
2393
2394
static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos)
2395
__acquires(rcu)
2396
{
2397
rcu_read_lock();
2398
return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2399
}
2400
2401
static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2402
{
2403
struct ip_mc_list *im;
2404
if (v == SEQ_START_TOKEN)
2405
im = igmp_mc_get_first(seq);
2406
else
2407
im = igmp_mc_get_next(seq, v);
2408
++*pos;
2409
return im;
2410
}
2411
2412
static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
2413
__releases(rcu)
2414
{
2415
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2416
2417
state->in_dev = NULL;
2418
state->dev = NULL;
2419
rcu_read_unlock();
2420
}
2421
2422
static int igmp_mc_seq_show(struct seq_file *seq, void *v)
2423
{
2424
if (v == SEQ_START_TOKEN)
2425
seq_puts(seq,
2426
"Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
2427
else {
2428
struct ip_mc_list *im = (struct ip_mc_list *)v;
2429
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2430
char *querier;
2431
#ifdef CONFIG_IP_MULTICAST
2432
querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
2433
IGMP_V2_SEEN(state->in_dev) ? "V2" :
2434
"V3";
2435
#else
2436
querier = "NONE";
2437
#endif
2438
2439
if (rcu_dereference(state->in_dev->mc_list) == im) {
2440
seq_printf(seq, "%d\t%-10s: %5d %7s\n",
2441
state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
2442
}
2443
2444
seq_printf(seq,
2445
"\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
2446
im->multiaddr, im->users,
2447
im->tm_running, im->tm_running ?
2448
jiffies_to_clock_t(im->timer.expires-jiffies) : 0,
2449
im->reporter);
2450
}
2451
return 0;
2452
}
2453
2454
static const struct seq_operations igmp_mc_seq_ops = {
2455
.start = igmp_mc_seq_start,
2456
.next = igmp_mc_seq_next,
2457
.stop = igmp_mc_seq_stop,
2458
.show = igmp_mc_seq_show,
2459
};
2460
2461
static int igmp_mc_seq_open(struct inode *inode, struct file *file)
2462
{
2463
return seq_open_net(inode, file, &igmp_mc_seq_ops,
2464
sizeof(struct igmp_mc_iter_state));
2465
}
2466
2467
static const struct file_operations igmp_mc_seq_fops = {
2468
.owner = THIS_MODULE,
2469
.open = igmp_mc_seq_open,
2470
.read = seq_read,
2471
.llseek = seq_lseek,
2472
.release = seq_release_net,
2473
};
2474
2475
struct igmp_mcf_iter_state {
2476
struct seq_net_private p;
2477
struct net_device *dev;
2478
struct in_device *idev;
2479
struct ip_mc_list *im;
2480
};
2481
2482
#define igmp_mcf_seq_private(seq) ((struct igmp_mcf_iter_state *)(seq)->private)
2483
2484
static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2485
{
2486
struct net *net = seq_file_net(seq);
2487
struct ip_sf_list *psf = NULL;
2488
struct ip_mc_list *im = NULL;
2489
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2490
2491
state->idev = NULL;
2492
state->im = NULL;
2493
for_each_netdev_rcu(net, state->dev) {
2494
struct in_device *idev;
2495
idev = __in_dev_get_rcu(state->dev);
2496
if (unlikely(idev == NULL))
2497
continue;
2498
im = rcu_dereference(idev->mc_list);
2499
if (likely(im != NULL)) {
2500
spin_lock_bh(&im->lock);
2501
psf = im->sources;
2502
if (likely(psf != NULL)) {
2503
state->im = im;
2504
state->idev = idev;
2505
break;
2506
}
2507
spin_unlock_bh(&im->lock);
2508
}
2509
}
2510
return psf;
2511
}
2512
2513
static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf)
2514
{
2515
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2516
2517
psf = psf->sf_next;
2518
while (!psf) {
2519
spin_unlock_bh(&state->im->lock);
2520
state->im = state->im->next;
2521
while (!state->im) {
2522
state->dev = next_net_device_rcu(state->dev);
2523
if (!state->dev) {
2524
state->idev = NULL;
2525
goto out;
2526
}
2527
state->idev = __in_dev_get_rcu(state->dev);
2528
if (!state->idev)
2529
continue;
2530
state->im = rcu_dereference(state->idev->mc_list);
2531
}
2532
if (!state->im)
2533
break;
2534
spin_lock_bh(&state->im->lock);
2535
psf = state->im->sources;
2536
}
2537
out:
2538
return psf;
2539
}
2540
2541
static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos)
2542
{
2543
struct ip_sf_list *psf = igmp_mcf_get_first(seq);
2544
if (psf)
2545
while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL)
2546
--pos;
2547
return pos ? NULL : psf;
2548
}
2549
2550
static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2551
__acquires(rcu)
2552
{
2553
rcu_read_lock();
2554
return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2555
}
2556
2557
static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2558
{
2559
struct ip_sf_list *psf;
2560
if (v == SEQ_START_TOKEN)
2561
psf = igmp_mcf_get_first(seq);
2562
else
2563
psf = igmp_mcf_get_next(seq, v);
2564
++*pos;
2565
return psf;
2566
}
2567
2568
static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2569
__releases(rcu)
2570
{
2571
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2572
if (likely(state->im != NULL)) {
2573
spin_unlock_bh(&state->im->lock);
2574
state->im = NULL;
2575
}
2576
state->idev = NULL;
2577
state->dev = NULL;
2578
rcu_read_unlock();
2579
}
2580
2581
static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
2582
{
2583
struct ip_sf_list *psf = (struct ip_sf_list *)v;
2584
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2585
2586
if (v == SEQ_START_TOKEN) {
2587
seq_printf(seq,
2588
"%3s %6s "
2589
"%10s %10s %6s %6s\n", "Idx",
2590
"Device", "MCA",
2591
"SRC", "INC", "EXC");
2592
} else {
2593
seq_printf(seq,
2594
"%3d %6.6s 0x%08x "
2595
"0x%08x %6lu %6lu\n",
2596
state->dev->ifindex, state->dev->name,
2597
ntohl(state->im->multiaddr),
2598
ntohl(psf->sf_inaddr),
2599
psf->sf_count[MCAST_INCLUDE],
2600
psf->sf_count[MCAST_EXCLUDE]);
2601
}
2602
return 0;
2603
}
2604
2605
static const struct seq_operations igmp_mcf_seq_ops = {
2606
.start = igmp_mcf_seq_start,
2607
.next = igmp_mcf_seq_next,
2608
.stop = igmp_mcf_seq_stop,
2609
.show = igmp_mcf_seq_show,
2610
};
2611
2612
static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
2613
{
2614
return seq_open_net(inode, file, &igmp_mcf_seq_ops,
2615
sizeof(struct igmp_mcf_iter_state));
2616
}
2617
2618
static const struct file_operations igmp_mcf_seq_fops = {
2619
.owner = THIS_MODULE,
2620
.open = igmp_mcf_seq_open,
2621
.read = seq_read,
2622
.llseek = seq_lseek,
2623
.release = seq_release_net,
2624
};
2625
2626
static int __net_init igmp_net_init(struct net *net)
2627
{
2628
struct proc_dir_entry *pde;
2629
2630
pde = proc_net_fops_create(net, "igmp", S_IRUGO, &igmp_mc_seq_fops);
2631
if (!pde)
2632
goto out_igmp;
2633
pde = proc_net_fops_create(net, "mcfilter", S_IRUGO, &igmp_mcf_seq_fops);
2634
if (!pde)
2635
goto out_mcfilter;
2636
return 0;
2637
2638
out_mcfilter:
2639
proc_net_remove(net, "igmp");
2640
out_igmp:
2641
return -ENOMEM;
2642
}
2643
2644
static void __net_exit igmp_net_exit(struct net *net)
2645
{
2646
proc_net_remove(net, "mcfilter");
2647
proc_net_remove(net, "igmp");
2648
}
2649
2650
static struct pernet_operations igmp_net_ops = {
2651
.init = igmp_net_init,
2652
.exit = igmp_net_exit,
2653
};
2654
2655
int __init igmp_mc_proc_init(void)
2656
{
2657
return register_pernet_subsys(&igmp_net_ops);
2658
}
2659
#endif
2660
2661