Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/netinet/igmp.c
39475 views
1
/*-
2
* SPDX-License-Identifier: BSD-3-Clause
3
*
4
* Copyright (c) 2007-2009 Bruce Simpson.
5
* Copyright (c) 1988 Stephen Deering.
6
* Copyright (c) 1992, 1993
7
* The Regents of the University of California. All rights reserved.
8
*
9
* This code is derived from software contributed to Berkeley by
10
* Stephen Deering of Stanford University.
11
*
12
* Redistribution and use in source and binary forms, with or without
13
* modification, are permitted provided that the following conditions
14
* are met:
15
* 1. Redistributions of source code must retain the above copyright
16
* notice, this list of conditions and the following disclaimer.
17
* 2. Redistributions in binary form must reproduce the above copyright
18
* notice, this list of conditions and the following disclaimer in the
19
* documentation and/or other materials provided with the distribution.
20
* 3. Neither the name of the University nor the names of its contributors
21
* may be used to endorse or promote products derived from this software
22
* without specific prior written permission.
23
*
24
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34
* SUCH DAMAGE.
35
*/
36
37
/*
38
* Internet Group Management Protocol (IGMP) routines.
39
* [RFC1112, RFC2236, RFC3376]
40
*
41
* Written by Steve Deering, Stanford, May 1988.
42
* Modified by Rosen Sharma, Stanford, Aug 1994.
43
* Modified by Bill Fenner, Xerox PARC, Feb 1995.
44
* Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
45
* Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
46
*
47
* MULTICAST Revision: 3.5.1.4
48
*/
49
50
#include <sys/cdefs.h>
51
#include "opt_ddb.h"
52
53
#include <sys/param.h>
54
#include <sys/systm.h>
55
#include <sys/module.h>
56
#include <sys/malloc.h>
57
#include <sys/mbuf.h>
58
#include <sys/socket.h>
59
#include <sys/kernel.h>
60
#include <sys/lock.h>
61
#include <sys/sysctl.h>
62
#include <sys/ktr.h>
63
#include <sys/condvar.h>
64
65
#ifdef DDB
66
#include <ddb/ddb.h>
67
#endif
68
69
#include <net/if.h>
70
#include <net/if_var.h>
71
#include <net/if_private.h>
72
#include <net/netisr.h>
73
#include <net/vnet.h>
74
75
#include <netinet/in.h>
76
#include <netinet/in_var.h>
77
#include <netinet/in_systm.h>
78
#include <netinet/ip.h>
79
#include <netinet/ip_var.h>
80
#include <netinet/ip_options.h>
81
#include <netinet/igmp.h>
82
#include <netinet/igmp_var.h>
83
84
#include <machine/in_cksum.h>
85
86
#include <security/mac/mac_framework.h>
87
88
#ifndef KTR_IGMPV3
89
#define KTR_IGMPV3 KTR_INET
90
#endif
91
92
#define IGMP_SLOWHZ 2 /* 2 slow timeouts per second */
93
#define IGMP_FASTHZ 5 /* 5 fast timeouts per second */
94
#define IGMP_RESPONSE_BURST_INTERVAL (IGMP_FASTHZ / 2)
95
96
static struct igmp_ifsoftc *
97
igi_alloc_locked(struct ifnet *);
98
static void igi_delete_locked(const struct ifnet *);
99
static void igmp_dispatch_queue(struct mbufq *, int, const int);
100
static void igmp_fasttimo_vnet(void);
101
static void igmp_final_leave(struct in_multi *, struct igmp_ifsoftc *);
102
static int igmp_handle_state_change(struct in_multi *,
103
struct igmp_ifsoftc *);
104
static int igmp_initial_join(struct in_multi *, struct igmp_ifsoftc *);
105
static int igmp_input_v1_query(struct ifnet *, const struct ip *,
106
const struct igmp *);
107
static int igmp_input_v2_query(struct ifnet *, const struct ip *,
108
const struct igmp *);
109
static int igmp_input_v3_query(struct ifnet *, const struct ip *,
110
/*const*/ struct igmpv3 *);
111
static int igmp_input_v3_group_query(struct in_multi *,
112
struct igmp_ifsoftc *, int, /*const*/ struct igmpv3 *);
113
static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
114
/*const*/ struct igmp *);
115
static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
116
/*const*/ struct igmp *);
117
static void igmp_intr(struct mbuf *);
118
static int igmp_isgroupreported(const struct in_addr);
119
static struct mbuf *
120
igmp_ra_alloc(void);
121
#ifdef KTR
122
static char * igmp_rec_type_to_str(const int);
123
#endif
124
static void igmp_set_version(struct igmp_ifsoftc *, const int);
125
static void igmp_slowtimo_vnet(void);
126
static int igmp_v1v2_queue_report(struct in_multi *, const int);
127
static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
128
static void igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *);
129
static void igmp_v2_update_group(struct in_multi *, const int);
130
static void igmp_v3_cancel_link_timers(struct igmp_ifsoftc *);
131
static void igmp_v3_dispatch_general_query(struct igmp_ifsoftc *);
132
static struct mbuf *
133
igmp_v3_encap_report(struct ifnet *, struct mbuf *);
134
static int igmp_v3_enqueue_group_record(struct mbufq *,
135
struct in_multi *, const int, const int, const int);
136
static int igmp_v3_enqueue_filter_change(struct mbufq *,
137
struct in_multi *);
138
static void igmp_v3_process_group_timers(struct in_multi_head *,
139
struct mbufq *, struct mbufq *, struct in_multi *,
140
const int);
141
static int igmp_v3_merge_state_changes(struct in_multi *,
142
struct mbufq *);
143
static void igmp_v3_suppress_group_record(struct in_multi *);
144
static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
145
static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
146
static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS);
147
static int sysctl_igmp_stat(SYSCTL_HANDLER_ARGS);
148
149
static const struct netisr_handler igmp_nh = {
150
.nh_name = "igmp",
151
.nh_handler = igmp_intr,
152
.nh_proto = NETISR_IGMP,
153
.nh_policy = NETISR_POLICY_SOURCE,
154
};
155
156
/*
157
* System-wide globals.
158
*
159
* Unlocked access to these is OK, except for the global IGMP output
160
* queue. The IGMP subsystem lock ends up being system-wide for the moment,
161
* because all VIMAGEs have to share a global output queue, as netisrs
162
* themselves are not virtualized.
163
*
164
* Locking:
165
* * The permitted lock order is: IN_MULTI_LIST_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
166
* Any may be taken independently; if any are held at the same
167
* time, the above lock order must be followed.
168
* * All output is delegated to the netisr.
169
* * IN_MULTI_LIST_LOCK covers in_multi.
170
* * IGMP_LOCK covers igmp_ifsoftc and any global variables in this file,
171
* including the output queue.
172
* * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
173
* per-link state iterators.
174
* * igmp_ifsoftc is valid as long as PF_INET is attached to the interface,
175
* therefore it is not refcounted.
176
* We allow unlocked reads of igmp_ifsoftc when accessed via in_multi.
177
*
178
* Reference counting
179
* * IGMP acquires its own reference every time an in_multi is passed to
180
* it and the group is being joined for the first time.
181
* * IGMP releases its reference(s) on in_multi in a deferred way,
182
* because the operations which process the release run as part of
183
* a loop whose control variables are directly affected by the release
184
* (that, and not recursing on the IF_ADDR_LOCK).
185
*
186
* VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds
187
* to a vnet in ifp->if_vnet.
188
*
189
* SMPng: XXX We may potentially race operations on ifma_protospec.
190
* The problem is that we currently lack a clean way of taking the
191
* IF_ADDR_LOCK() between the ifnet and in layers w/o recursing,
192
* as anything which modifies ifma needs to be covered by that lock.
193
* So check for ifma_protospec being NULL before proceeding.
194
*/
195
struct mtx igmp_mtx;
196
197
struct mbuf *m_raopt; /* Router Alert option */
198
static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state");
199
200
/*
201
* VIMAGE-wide globals.
202
*
203
* The IGMPv3 timers themselves need to run per-image, however, for
204
* historical reasons, timers run globally. This needs to be improved.
205
* An ifnet can only be in one vimage at a time, and the loopback
206
* ifnet, loif, is itself virtualized.
207
* It would otherwise be possible to seriously hose IGMP state,
208
* and create inconsistencies in upstream multicast routing, if you have
209
* multiple VIMAGEs running on the same link joining different multicast
210
* groups, UNLESS the "primary IP address" is different. This is because
211
* IGMP for IPv4 does not force link-local addresses to be used for each
212
* node, unlike MLD for IPv6.
213
* Obviously the IGMPv3 per-interface state has per-vimage granularity
214
* also as a result.
215
*
216
* FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection
217
* policy to control the address used by IGMP on the link.
218
*/
219
VNET_DEFINE_STATIC(int, interface_timers_running); /* IGMPv3 general
220
* query response */
221
VNET_DEFINE_STATIC(int, state_change_timers_running); /* IGMPv3 state-change
222
* retransmit */
223
VNET_DEFINE_STATIC(int, current_state_timers_running); /* IGMPv1/v2 host
224
* report; IGMPv3 g/sg
225
* query response */
226
227
#define V_interface_timers_running VNET(interface_timers_running)
228
#define V_state_change_timers_running VNET(state_change_timers_running)
229
#define V_current_state_timers_running VNET(current_state_timers_running)
230
231
VNET_PCPUSTAT_DEFINE(struct igmpstat, igmpstat);
232
VNET_PCPUSTAT_SYSINIT(igmpstat);
233
VNET_PCPUSTAT_SYSUNINIT(igmpstat);
234
235
VNET_DEFINE_STATIC(LIST_HEAD(, igmp_ifsoftc), igi_head) =
236
LIST_HEAD_INITIALIZER(igi_head);
237
VNET_DEFINE_STATIC(struct timeval, igmp_gsrdelay) = {10, 0};
238
239
#define V_igi_head VNET(igi_head)
240
#define V_igmp_gsrdelay VNET(igmp_gsrdelay)
241
242
VNET_DEFINE_STATIC(int, igmp_recvifkludge) = 1;
243
VNET_DEFINE_STATIC(int, igmp_sendra) = 1;
244
VNET_DEFINE_STATIC(int, igmp_sendlocal) = 1;
245
VNET_DEFINE_STATIC(int, igmp_v1enable) = 1;
246
VNET_DEFINE_STATIC(int, igmp_v2enable) = 1;
247
VNET_DEFINE_STATIC(int, igmp_legacysupp);
248
VNET_DEFINE_STATIC(int, igmp_default_version) = IGMP_VERSION_3;
249
250
#define V_igmp_recvifkludge VNET(igmp_recvifkludge)
251
#define V_igmp_sendra VNET(igmp_sendra)
252
#define V_igmp_sendlocal VNET(igmp_sendlocal)
253
#define V_igmp_v1enable VNET(igmp_v1enable)
254
#define V_igmp_v2enable VNET(igmp_v2enable)
255
#define V_igmp_legacysupp VNET(igmp_legacysupp)
256
#define V_igmp_default_version VNET(igmp_default_version)
257
258
/*
259
* Virtualized sysctls.
260
*/
261
SYSCTL_PROC(_net_inet_igmp, IGMPCTL_STATS, stats,
262
CTLFLAG_VNET | CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_MPSAFE,
263
&VNET_NAME(igmpstat), 0, sysctl_igmp_stat, "S,igmpstat",
264
"IGMP statistics (struct igmpstat, netinet/igmp_var.h)");
265
SYSCTL_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_VNET | CTLFLAG_RW,
266
&VNET_NAME(igmp_recvifkludge), 0,
267
"Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
268
SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_VNET | CTLFLAG_RW,
269
&VNET_NAME(igmp_sendra), 0,
270
"Send IP Router Alert option in IGMPv2/v3 messages");
271
SYSCTL_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_VNET | CTLFLAG_RW,
272
&VNET_NAME(igmp_sendlocal), 0,
273
"Send IGMP membership reports for 224.0.0.0/24 groups");
274
SYSCTL_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_VNET | CTLFLAG_RW,
275
&VNET_NAME(igmp_v1enable), 0,
276
"Enable backwards compatibility with IGMPv1");
277
SYSCTL_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_VNET | CTLFLAG_RW,
278
&VNET_NAME(igmp_v2enable), 0,
279
"Enable backwards compatibility with IGMPv2");
280
SYSCTL_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_VNET | CTLFLAG_RW,
281
&VNET_NAME(igmp_legacysupp), 0,
282
"Allow v1/v2 reports to suppress v3 group responses");
283
SYSCTL_PROC(_net_inet_igmp, OID_AUTO, default_version,
284
CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
285
&VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I",
286
"Default version of IGMP to run on each interface");
287
SYSCTL_PROC(_net_inet_igmp, OID_AUTO, gsrdelay,
288
CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
289
&VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I",
290
"Rate limit for IGMPv3 Group-and-Source queries in seconds");
291
292
/*
293
* Non-virtualized sysctls.
294
*/
295
static SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo,
296
CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_igmp_ifinfo,
297
"Per-interface IGMPv3 state");
298
299
static __inline void
300
igmp_save_context(struct mbuf *m, struct ifnet *ifp)
301
{
302
303
#ifdef VIMAGE
304
m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
305
#endif /* VIMAGE */
306
m->m_pkthdr.rcvif = ifp;
307
m->m_pkthdr.flowid = ifp->if_index;
308
}
309
310
static __inline void
311
igmp_scrub_context(struct mbuf *m)
312
{
313
314
m->m_pkthdr.PH_loc.ptr = NULL;
315
m->m_pkthdr.flowid = 0;
316
}
317
318
/*
319
* Restore context from a queued IGMP output chain.
320
* Return saved ifindex.
321
*
322
* VIMAGE: The assertion is there to make sure that we
323
* actually called CURVNET_SET() with what's in the mbuf chain.
324
*/
325
static __inline uint32_t
326
igmp_restore_context(struct mbuf *m)
327
{
328
329
#ifdef notyet
330
#if defined(VIMAGE) && defined(INVARIANTS)
331
KASSERT(curvnet == (m->m_pkthdr.PH_loc.ptr),
332
("%s: called when curvnet was not restored", __func__));
333
#endif
334
#endif
335
return (m->m_pkthdr.flowid);
336
}
337
338
/*
339
* IGMP statistics.
340
*/
341
static int
342
sysctl_igmp_stat(SYSCTL_HANDLER_ARGS)
343
{
344
struct igmpstat igps0;
345
int error;
346
char *p;
347
348
error = sysctl_wire_old_buffer(req, sizeof(struct igmpstat));
349
if (error)
350
return (error);
351
352
if (req->oldptr != NULL) {
353
if (req->oldlen < sizeof(struct igmpstat))
354
error = ENOMEM;
355
else {
356
/*
357
* Copy the counters, and explicitly set the struct's
358
* version and length fields.
359
*/
360
COUNTER_ARRAY_COPY(VNET(igmpstat), &igps0,
361
sizeof(struct igmpstat) / sizeof(uint64_t));
362
igps0.igps_version = IGPS_VERSION_3;
363
igps0.igps_len = IGPS_VERSION3_LEN;
364
error = SYSCTL_OUT(req, &igps0,
365
sizeof(struct igmpstat));
366
}
367
} else
368
req->validlen = sizeof(struct igmpstat);
369
if (error)
370
goto out;
371
if (req->newptr != NULL) {
372
if (req->newlen < sizeof(struct igmpstat))
373
error = ENOMEM;
374
else
375
error = SYSCTL_IN(req, &igps0,
376
sizeof(igps0));
377
if (error)
378
goto out;
379
/*
380
* igps0 must be "all zero".
381
*/
382
p = (char *)&igps0;
383
while (p < (char *)&igps0 + sizeof(igps0) && *p == '\0')
384
p++;
385
if (p != (char *)&igps0 + sizeof(igps0)) {
386
error = EINVAL;
387
goto out;
388
}
389
COUNTER_ARRAY_ZERO(VNET(igmpstat),
390
sizeof(struct igmpstat) / sizeof(uint64_t));
391
}
392
out:
393
return (error);
394
}
395
396
/*
397
* Retrieve or set default IGMP version.
398
*
399
* VIMAGE: Assume curvnet set by caller.
400
* SMPng: NOTE: Serialized by IGMP lock.
401
*/
402
static int
403
sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
404
{
405
struct epoch_tracker et;
406
int error;
407
int new;
408
struct igmp_ifsoftc *igi;
409
410
error = sysctl_wire_old_buffer(req, sizeof(int));
411
if (error)
412
return (error);
413
414
new = V_igmp_default_version;
415
416
error = sysctl_handle_int(oidp, &new, 0, req);
417
if (error || !req->newptr)
418
return (error);
419
420
if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3)
421
return (EINVAL);
422
423
IN_MULTI_LIST_LOCK();
424
IGMP_LOCK();
425
NET_EPOCH_ENTER(et);
426
427
if (V_igmp_default_version != new) {
428
CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d",
429
V_igmp_default_version, new);
430
431
V_igmp_default_version = new;
432
433
LIST_FOREACH(igi, &V_igi_head, igi_link) {
434
if (igi->igi_version > V_igmp_default_version){
435
igmp_set_version(igi, V_igmp_default_version);
436
}
437
}
438
}
439
440
NET_EPOCH_EXIT(et);
441
IN_MULTI_LIST_UNLOCK();
442
IGMP_UNLOCK();
443
return (error);
444
}
445
446
/*
447
* Retrieve or set threshold between group-source queries in seconds.
448
*
449
* VIMAGE: Assume curvnet set by caller.
450
* SMPng: NOTE: Serialized by IGMP lock.
451
*/
452
static int
453
sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
454
{
455
int error;
456
int i;
457
458
error = sysctl_wire_old_buffer(req, sizeof(int));
459
if (error)
460
return (error);
461
462
IGMP_LOCK();
463
464
i = V_igmp_gsrdelay.tv_sec;
465
466
error = sysctl_handle_int(oidp, &i, 0, req);
467
if (error || !req->newptr)
468
goto out_locked;
469
470
if (i < -1 || i >= 60) {
471
error = EINVAL;
472
goto out_locked;
473
}
474
475
CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d",
476
V_igmp_gsrdelay.tv_sec, i);
477
V_igmp_gsrdelay.tv_sec = i;
478
479
out_locked:
480
IGMP_UNLOCK();
481
return (error);
482
}
483
484
/*
485
* Expose struct igmp_ifsoftc to userland, keyed by ifindex.
486
* For use by ifmcstat(8).
487
*
488
* SMPng: NOTE: Does an unlocked ifindex space read.
489
* VIMAGE: Assume curvnet set by caller. The node handler itself
490
* is not directly virtualized.
491
*/
492
static int
493
sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
494
{
495
struct epoch_tracker et;
496
int *name;
497
int error;
498
u_int namelen;
499
struct ifnet *ifp;
500
struct igmp_ifsoftc *igi;
501
502
name = (int *)arg1;
503
namelen = arg2;
504
505
if (req->newptr != NULL)
506
return (EPERM);
507
508
if (namelen != 1)
509
return (EINVAL);
510
511
error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo));
512
if (error)
513
return (error);
514
515
IN_MULTI_LIST_LOCK();
516
IGMP_LOCK();
517
518
error = ENOENT;
519
520
NET_EPOCH_ENTER(et);
521
ifp = ifnet_byindex(name[0]);
522
NET_EPOCH_EXIT(et);
523
if (ifp == NULL)
524
goto out_locked;
525
526
LIST_FOREACH(igi, &V_igi_head, igi_link) {
527
if (ifp == igi->igi_ifp) {
528
struct igmp_ifinfo info;
529
530
info.igi_version = igi->igi_version;
531
info.igi_v1_timer = igi->igi_v1_timer;
532
info.igi_v2_timer = igi->igi_v2_timer;
533
info.igi_v3_timer = igi->igi_v3_timer;
534
info.igi_flags = igi->igi_flags;
535
info.igi_rv = igi->igi_rv;
536
info.igi_qi = igi->igi_qi;
537
info.igi_qri = igi->igi_qri;
538
info.igi_uri = igi->igi_uri;
539
error = SYSCTL_OUT(req, &info, sizeof(info));
540
break;
541
}
542
}
543
544
out_locked:
545
IGMP_UNLOCK();
546
IN_MULTI_LIST_UNLOCK();
547
return (error);
548
}
549
550
/*
551
* Dispatch an entire queue of pending packet chains
552
* using the netisr.
553
* VIMAGE: Assumes the vnet pointer has been set.
554
*/
555
static void
556
igmp_dispatch_queue(struct mbufq *mq, int limit, const int loop)
557
{
558
struct epoch_tracker et;
559
struct mbuf *m;
560
561
NET_EPOCH_ENTER(et);
562
while ((m = mbufq_dequeue(mq)) != NULL) {
563
CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, mq, m);
564
if (loop)
565
m->m_flags |= M_IGMP_LOOP;
566
netisr_dispatch(NETISR_IGMP, m);
567
if (--limit == 0)
568
break;
569
}
570
NET_EPOCH_EXIT(et);
571
}
572
573
/*
574
* Filter outgoing IGMP report state by group.
575
*
576
* Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
577
* If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
578
* disabled for all groups in the 224.0.0.0/24 link-local scope. However,
579
* this may break certain IGMP snooping switches which rely on the old
580
* report behaviour.
581
*
582
* Return zero if the given group is one for which IGMP reports
583
* should be suppressed, or non-zero if reports should be issued.
584
*/
585
static __inline int
586
igmp_isgroupreported(const struct in_addr addr)
587
{
588
589
if (in_allhosts(addr) ||
590
((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr)))))
591
return (0);
592
593
return (1);
594
}
595
596
/*
597
* Construct a Router Alert option to use in outgoing packets.
598
*/
599
static struct mbuf *
600
igmp_ra_alloc(void)
601
{
602
struct mbuf *m;
603
struct ipoption *p;
604
605
m = m_get(M_WAITOK, MT_DATA);
606
p = mtod(m, struct ipoption *);
607
p->ipopt_dst.s_addr = INADDR_ANY;
608
p->ipopt_list[0] = (char)IPOPT_RA; /* Router Alert Option */
609
p->ipopt_list[1] = 0x04; /* 4 bytes long */
610
p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
611
p->ipopt_list[3] = 0x00; /* pad byte */
612
m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
613
614
return (m);
615
}
616
617
/*
618
* Attach IGMP when PF_INET is attached to an interface.
619
*/
620
struct igmp_ifsoftc *
621
igmp_domifattach(struct ifnet *ifp)
622
{
623
struct igmp_ifsoftc *igi;
624
625
CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
626
__func__, ifp, ifp->if_xname);
627
628
IGMP_LOCK();
629
630
igi = igi_alloc_locked(ifp);
631
if (!(ifp->if_flags & IFF_MULTICAST))
632
igi->igi_flags |= IGIF_SILENT;
633
634
IGMP_UNLOCK();
635
636
return (igi);
637
}
638
639
/*
640
* VIMAGE: assume curvnet set by caller.
641
*/
642
static struct igmp_ifsoftc *
643
igi_alloc_locked(/*const*/ struct ifnet *ifp)
644
{
645
struct igmp_ifsoftc *igi;
646
647
IGMP_LOCK_ASSERT();
648
649
igi = malloc(sizeof(struct igmp_ifsoftc), M_IGMP, M_NOWAIT|M_ZERO);
650
if (igi == NULL)
651
goto out;
652
653
igi->igi_ifp = ifp;
654
igi->igi_version = V_igmp_default_version;
655
igi->igi_flags = 0;
656
igi->igi_rv = IGMP_RV_INIT;
657
igi->igi_qi = IGMP_QI_INIT;
658
igi->igi_qri = IGMP_QRI_INIT;
659
igi->igi_uri = IGMP_URI_INIT;
660
mbufq_init(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
661
662
LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
663
664
CTR2(KTR_IGMPV3, "allocate igmp_ifsoftc for ifp %p(%s)",
665
ifp, ifp->if_xname);
666
667
out:
668
return (igi);
669
}
670
671
/*
672
* Hook for ifdetach.
673
*
674
* NOTE: Some finalization tasks need to run before the protocol domain
675
* is detached, but also before the link layer does its cleanup.
676
*
677
* SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK().
678
* XXX This is also bitten by unlocked ifma_protospec access.
679
*/
680
void
681
igmp_ifdetach(struct ifnet *ifp)
682
{
683
struct epoch_tracker et;
684
struct igmp_ifsoftc *igi;
685
struct ifmultiaddr *ifma;
686
struct in_multi *inm;
687
struct in_multi_head inm_free_tmp;
688
CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
689
ifp->if_xname);
690
691
SLIST_INIT(&inm_free_tmp);
692
IGMP_LOCK();
693
694
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
695
if (igi->igi_version == IGMP_VERSION_3) {
696
IF_ADDR_WLOCK(ifp);
697
NET_EPOCH_ENTER(et);
698
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
699
inm = inm_ifmultiaddr_get_inm(ifma);
700
if (inm == NULL)
701
continue;
702
if (inm->inm_state == IGMP_LEAVING_MEMBER)
703
inm_rele_locked(&inm_free_tmp, inm);
704
inm_clear_recorded(inm);
705
}
706
NET_EPOCH_EXIT(et);
707
IF_ADDR_WUNLOCK(ifp);
708
inm_release_list_deferred(&inm_free_tmp);
709
}
710
IGMP_UNLOCK();
711
712
}
713
714
/*
715
* Hook for domifdetach.
716
*/
717
void
718
igmp_domifdetach(struct ifnet *ifp)
719
{
720
721
CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
722
__func__, ifp, ifp->if_xname);
723
724
IGMP_LOCK();
725
igi_delete_locked(ifp);
726
IGMP_UNLOCK();
727
}
728
729
static void
730
igi_delete_locked(const struct ifnet *ifp)
731
{
732
struct igmp_ifsoftc *igi, *tigi;
733
734
CTR3(KTR_IGMPV3, "%s: freeing igmp_ifsoftc for ifp %p(%s)",
735
__func__, ifp, ifp->if_xname);
736
737
IGMP_LOCK_ASSERT();
738
739
LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) {
740
if (igi->igi_ifp == ifp) {
741
/*
742
* Free deferred General Query responses.
743
*/
744
mbufq_drain(&igi->igi_gq);
745
746
LIST_REMOVE(igi, igi_link);
747
free(igi, M_IGMP);
748
return;
749
}
750
}
751
}
752
753
/*
754
* Process a received IGMPv1 query.
755
* Return non-zero if the message should be dropped.
756
*
757
* VIMAGE: The curvnet pointer is derived from the input ifp.
758
*/
759
static int
760
igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
761
const struct igmp *igmp)
762
{
763
struct ifmultiaddr *ifma;
764
struct igmp_ifsoftc *igi;
765
struct in_multi *inm;
766
767
NET_EPOCH_ASSERT();
768
769
/*
770
* IGMPv1 Host Mmembership Queries SHOULD always be addressed to
771
* 224.0.0.1. They are always treated as General Queries.
772
* igmp_group is always ignored. Do not drop it as a userland
773
* daemon may wish to see it.
774
* XXX SMPng: unlocked increments in igmpstat assumed atomic.
775
*/
776
if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) {
777
IGMPSTAT_INC(igps_rcv_badqueries);
778
return (0);
779
}
780
IGMPSTAT_INC(igps_rcv_gen_queries);
781
782
IN_MULTI_LIST_LOCK();
783
IGMP_LOCK();
784
785
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
786
KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
787
788
if (igi->igi_flags & IGIF_LOOPBACK) {
789
CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)",
790
ifp, ifp->if_xname);
791
goto out_locked;
792
}
793
794
/*
795
* Switch to IGMPv1 host compatibility mode.
796
*/
797
igmp_set_version(igi, IGMP_VERSION_1);
798
799
CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname);
800
801
/*
802
* Start the timers in all of our group records
803
* for the interface on which the query arrived,
804
* except those which are already running.
805
*/
806
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
807
inm = inm_ifmultiaddr_get_inm(ifma);
808
if (inm == NULL)
809
continue;
810
if (inm->inm_timer != 0)
811
continue;
812
switch (inm->inm_state) {
813
case IGMP_NOT_MEMBER:
814
case IGMP_SILENT_MEMBER:
815
break;
816
case IGMP_G_QUERY_PENDING_MEMBER:
817
case IGMP_SG_QUERY_PENDING_MEMBER:
818
case IGMP_REPORTING_MEMBER:
819
case IGMP_IDLE_MEMBER:
820
case IGMP_LAZY_MEMBER:
821
case IGMP_SLEEPING_MEMBER:
822
case IGMP_AWAKENING_MEMBER:
823
inm->inm_state = IGMP_REPORTING_MEMBER;
824
inm->inm_timer = IGMP_RANDOM_DELAY(
825
IGMP_V1V2_MAX_RI * IGMP_FASTHZ);
826
V_current_state_timers_running = 1;
827
break;
828
case IGMP_LEAVING_MEMBER:
829
break;
830
}
831
}
832
833
out_locked:
834
IGMP_UNLOCK();
835
IN_MULTI_LIST_UNLOCK();
836
837
return (0);
838
}
839
840
/*
841
* Process a received IGMPv2 general or group-specific query.
842
*/
843
static int
844
igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
845
const struct igmp *igmp)
846
{
847
struct ifmultiaddr *ifma;
848
struct igmp_ifsoftc *igi;
849
struct in_multi *inm;
850
int is_general_query;
851
uint16_t timer;
852
853
NET_EPOCH_ASSERT();
854
855
is_general_query = 0;
856
857
/*
858
* Validate address fields upfront.
859
* XXX SMPng: unlocked increments in igmpstat assumed atomic.
860
*/
861
if (in_nullhost(igmp->igmp_group)) {
862
/*
863
* IGMPv2 General Query.
864
* If this was not sent to the all-hosts group, ignore it.
865
*/
866
if (!in_allhosts(ip->ip_dst))
867
return (0);
868
IGMPSTAT_INC(igps_rcv_gen_queries);
869
is_general_query = 1;
870
} else {
871
/* IGMPv2 Group-Specific Query. */
872
IGMPSTAT_INC(igps_rcv_group_queries);
873
}
874
875
IN_MULTI_LIST_LOCK();
876
IGMP_LOCK();
877
878
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
879
KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
880
881
if (igi->igi_flags & IGIF_LOOPBACK) {
882
CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)",
883
ifp, ifp->if_xname);
884
goto out_locked;
885
}
886
887
/*
888
* Ignore v2 query if in v1 Compatibility Mode.
889
*/
890
if (igi->igi_version == IGMP_VERSION_1)
891
goto out_locked;
892
893
igmp_set_version(igi, IGMP_VERSION_2);
894
895
timer = igmp->igmp_code * IGMP_FASTHZ / IGMP_TIMER_SCALE;
896
if (timer == 0)
897
timer = 1;
898
899
if (is_general_query) {
900
/*
901
* For each reporting group joined on this
902
* interface, kick the report timer.
903
*/
904
CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)",
905
ifp, ifp->if_xname);
906
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
907
inm = inm_ifmultiaddr_get_inm(ifma);
908
if (inm == NULL)
909
continue;
910
igmp_v2_update_group(inm, timer);
911
}
912
} else {
913
/*
914
* Group-specific IGMPv2 query, we need only
915
* look up the single group to process it.
916
*/
917
inm = inm_lookup(ifp, igmp->igmp_group);
918
if (inm != NULL) {
919
CTR3(KTR_IGMPV3,
920
"process v2 query 0x%08x on ifp %p(%s)",
921
ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
922
igmp_v2_update_group(inm, timer);
923
}
924
}
925
926
out_locked:
927
IGMP_UNLOCK();
928
IN_MULTI_LIST_UNLOCK();
929
930
return (0);
931
}
932
933
/*
934
* Update the report timer on a group in response to an IGMPv2 query.
935
*
936
* If we are becoming the reporting member for this group, start the timer.
937
* If we already are the reporting member for this group, and timer is
938
* below the threshold, reset it.
939
*
940
* We may be updating the group for the first time since we switched
941
* to IGMPv3. If we are, then we must clear any recorded source lists,
942
* and transition to REPORTING state; the group timer is overloaded
943
* for group and group-source query responses.
944
*
945
* Unlike IGMPv3, the delay per group should be jittered
946
* to avoid bursts of IGMPv2 reports.
947
*/
948
static void
949
igmp_v2_update_group(struct in_multi *inm, const int timer)
950
{
951
952
CTR4(KTR_IGMPV3, "0x%08x: %s/%s timer=%d", __func__,
953
ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname, timer);
954
955
IN_MULTI_LIST_LOCK_ASSERT();
956
957
switch (inm->inm_state) {
958
case IGMP_NOT_MEMBER:
959
case IGMP_SILENT_MEMBER:
960
break;
961
case IGMP_REPORTING_MEMBER:
962
if (inm->inm_timer != 0 &&
963
inm->inm_timer <= timer) {
964
CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, "
965
"skipping.", __func__);
966
break;
967
}
968
/* FALLTHROUGH */
969
case IGMP_SG_QUERY_PENDING_MEMBER:
970
case IGMP_G_QUERY_PENDING_MEMBER:
971
case IGMP_IDLE_MEMBER:
972
case IGMP_LAZY_MEMBER:
973
case IGMP_AWAKENING_MEMBER:
974
CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__);
975
inm->inm_state = IGMP_REPORTING_MEMBER;
976
inm->inm_timer = IGMP_RANDOM_DELAY(timer);
977
V_current_state_timers_running = 1;
978
break;
979
case IGMP_SLEEPING_MEMBER:
980
CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__);
981
inm->inm_state = IGMP_AWAKENING_MEMBER;
982
break;
983
case IGMP_LEAVING_MEMBER:
984
break;
985
}
986
}
987
988
/*
989
* Process a received IGMPv3 general, group-specific or
990
* group-and-source-specific query.
991
* Assumes m has already been pulled up to the full IGMP message length.
992
* Return 0 if successful, otherwise an appropriate error code is returned.
993
*/
994
static int
995
igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
996
/*const*/ struct igmpv3 *igmpv3)
997
{
998
struct igmp_ifsoftc *igi;
999
struct in_multi *inm;
1000
int is_general_query;
1001
uint32_t maxresp, nsrc, qqi;
1002
uint16_t timer;
1003
uint8_t qrv;
1004
1005
is_general_query = 0;
1006
1007
CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname);
1008
1009
maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
1010
if (maxresp >= 128) {
1011
maxresp = IGMP_MANT(igmpv3->igmp_code) <<
1012
(IGMP_EXP(igmpv3->igmp_code) + 3);
1013
}
1014
1015
/*
1016
* Robustness must never be less than 2 for on-wire IGMPv3.
1017
* FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
1018
* an exception for interfaces whose IGMPv3 state changes
1019
* are redirected to loopback (e.g. MANET).
1020
*/
1021
qrv = IGMP_QRV(igmpv3->igmp_misc);
1022
if (qrv < 2) {
1023
CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__,
1024
qrv, IGMP_RV_INIT);
1025
qrv = IGMP_RV_INIT;
1026
}
1027
1028
qqi = igmpv3->igmp_qqi;
1029
if (qqi >= 128) {
1030
qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
1031
(IGMP_EXP(igmpv3->igmp_qqi) + 3);
1032
}
1033
1034
timer = maxresp * IGMP_FASTHZ / IGMP_TIMER_SCALE;
1035
if (timer == 0)
1036
timer = 1;
1037
1038
nsrc = ntohs(igmpv3->igmp_numsrc);
1039
1040
/*
1041
* Validate address fields and versions upfront before
1042
* accepting v3 query.
1043
* XXX SMPng: Unlocked access to igmpstat counters here.
1044
*/
1045
if (in_nullhost(igmpv3->igmp_group)) {
1046
/*
1047
* IGMPv3 General Query.
1048
*
1049
* General Queries SHOULD be directed to 224.0.0.1.
1050
* A general query with a source list has undefined
1051
* behaviour; discard it.
1052
*/
1053
IGMPSTAT_INC(igps_rcv_gen_queries);
1054
if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
1055
IGMPSTAT_INC(igps_rcv_badqueries);
1056
return (0);
1057
}
1058
is_general_query = 1;
1059
} else {
1060
/* Group or group-source specific query. */
1061
if (nsrc == 0)
1062
IGMPSTAT_INC(igps_rcv_group_queries);
1063
else
1064
IGMPSTAT_INC(igps_rcv_gsr_queries);
1065
}
1066
1067
IN_MULTI_LIST_LOCK();
1068
IGMP_LOCK();
1069
1070
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
1071
KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
1072
1073
if (igi->igi_flags & IGIF_LOOPBACK) {
1074
CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)",
1075
ifp, ifp->if_xname);
1076
goto out_locked;
1077
}
1078
1079
/*
1080
* Discard the v3 query if we're in Compatibility Mode.
1081
* The RFC is not obviously worded that hosts need to stay in
1082
* compatibility mode until the Old Version Querier Present
1083
* timer expires.
1084
*/
1085
if (igi->igi_version != IGMP_VERSION_3) {
1086
CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)",
1087
igi->igi_version, ifp, ifp->if_xname);
1088
goto out_locked;
1089
}
1090
1091
igmp_set_version(igi, IGMP_VERSION_3);
1092
igi->igi_rv = qrv;
1093
igi->igi_qi = qqi;
1094
igi->igi_qri = maxresp;
1095
1096
CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi,
1097
maxresp);
1098
1099
if (is_general_query) {
1100
/*
1101
* Schedule a current-state report on this ifp for
1102
* all groups, possibly containing source lists.
1103
* If there is a pending General Query response
1104
* scheduled earlier than the selected delay, do
1105
* not schedule any other reports.
1106
* Otherwise, reset the interface timer.
1107
*/
1108
CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)",
1109
ifp, ifp->if_xname);
1110
if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
1111
igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
1112
V_interface_timers_running = 1;
1113
}
1114
} else {
1115
/*
1116
* Group-source-specific queries are throttled on
1117
* a per-group basis to defeat denial-of-service attempts.
1118
* Queries for groups we are not a member of on this
1119
* link are simply ignored.
1120
*/
1121
inm = inm_lookup(ifp, igmpv3->igmp_group);
1122
if (inm == NULL)
1123
goto out_locked;
1124
if (nsrc > 0) {
1125
if (!ratecheck(&inm->inm_lastgsrtv,
1126
&V_igmp_gsrdelay)) {
1127
CTR1(KTR_IGMPV3, "%s: GS query throttled.",
1128
__func__);
1129
IGMPSTAT_INC(igps_drop_gsr_queries);
1130
goto out_locked;
1131
}
1132
}
1133
CTR3(KTR_IGMPV3, "process v3 0x%08x query on ifp %p(%s)",
1134
ntohl(igmpv3->igmp_group.s_addr), ifp, ifp->if_xname);
1135
/*
1136
* If there is a pending General Query response
1137
* scheduled sooner than the selected delay, no
1138
* further report need be scheduled.
1139
* Otherwise, prepare to respond to the
1140
* group-specific or group-and-source query.
1141
*/
1142
if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer)
1143
igmp_input_v3_group_query(inm, igi, timer, igmpv3);
1144
}
1145
1146
out_locked:
1147
IGMP_UNLOCK();
1148
IN_MULTI_LIST_UNLOCK();
1149
1150
return (0);
1151
}
1152
1153
/*
1154
* Process a received IGMPv3 group-specific or group-and-source-specific
1155
* query.
1156
* Return <0 if any error occurred. Currently this is ignored.
1157
*/
1158
static int
1159
igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifsoftc *igi,
1160
int timer, /*const*/ struct igmpv3 *igmpv3)
1161
{
1162
int retval;
1163
uint16_t nsrc;
1164
1165
IN_MULTI_LIST_LOCK_ASSERT();
1166
IGMP_LOCK_ASSERT();
1167
1168
retval = 0;
1169
1170
switch (inm->inm_state) {
1171
case IGMP_NOT_MEMBER:
1172
case IGMP_SILENT_MEMBER:
1173
case IGMP_SLEEPING_MEMBER:
1174
case IGMP_LAZY_MEMBER:
1175
case IGMP_AWAKENING_MEMBER:
1176
case IGMP_IDLE_MEMBER:
1177
case IGMP_LEAVING_MEMBER:
1178
return (retval);
1179
break;
1180
case IGMP_REPORTING_MEMBER:
1181
case IGMP_G_QUERY_PENDING_MEMBER:
1182
case IGMP_SG_QUERY_PENDING_MEMBER:
1183
break;
1184
}
1185
1186
nsrc = ntohs(igmpv3->igmp_numsrc);
1187
1188
/*
1189
* Deal with group-specific queries upfront.
1190
* If any group query is already pending, purge any recorded
1191
* source-list state if it exists, and schedule a query response
1192
* for this group-specific query.
1193
*/
1194
if (nsrc == 0) {
1195
if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
1196
inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
1197
inm_clear_recorded(inm);
1198
timer = min(inm->inm_timer, timer);
1199
}
1200
inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
1201
inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1202
V_current_state_timers_running = 1;
1203
return (retval);
1204
}
1205
1206
/*
1207
* Deal with the case where a group-and-source-specific query has
1208
* been received but a group-specific query is already pending.
1209
*/
1210
if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
1211
timer = min(inm->inm_timer, timer);
1212
inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1213
V_current_state_timers_running = 1;
1214
return (retval);
1215
}
1216
1217
/*
1218
* Finally, deal with the case where a group-and-source-specific
1219
* query has been received, where a response to a previous g-s-r
1220
* query exists, or none exists.
1221
* In this case, we need to parse the source-list which the Querier
1222
* has provided us with and check if we have any source list filter
1223
* entries at T1 for these sources. If we do not, there is no need
1224
* schedule a report and the query may be dropped.
1225
* If we do, we must record them and schedule a current-state
1226
* report for those sources.
1227
* FIXME: Handling source lists larger than 1 mbuf requires that
1228
* we pass the mbuf chain pointer down to this function, and use
1229
* m_getptr() to walk the chain.
1230
*/
1231
if (inm->inm_nsrc > 0) {
1232
const struct in_addr *ap;
1233
int i, nrecorded;
1234
1235
ap = (const struct in_addr *)(igmpv3 + 1);
1236
nrecorded = 0;
1237
for (i = 0; i < nsrc; i++, ap++) {
1238
retval = inm_record_source(inm, ap->s_addr);
1239
if (retval < 0)
1240
break;
1241
nrecorded += retval;
1242
}
1243
if (nrecorded > 0) {
1244
CTR1(KTR_IGMPV3,
1245
"%s: schedule response to SG query", __func__);
1246
inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
1247
inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1248
V_current_state_timers_running = 1;
1249
}
1250
}
1251
1252
return (retval);
1253
}
1254
1255
/*
1256
* Process a received IGMPv1 host membership report.
1257
*
1258
* NOTE: 0.0.0.0 workaround breaks const correctness.
1259
*/
1260
static int
1261
igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1262
/*const*/ struct igmp *igmp)
1263
{
1264
struct in_ifaddr *ia;
1265
struct in_multi *inm;
1266
1267
IGMPSTAT_INC(igps_rcv_reports);
1268
1269
if (ifp->if_flags & IFF_LOOPBACK)
1270
return (0);
1271
1272
if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1273
!in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1274
IGMPSTAT_INC(igps_rcv_badreports);
1275
return (EINVAL);
1276
}
1277
1278
/*
1279
* RFC 3376, Section 4.2.13, 9.2, 9.3:
1280
* Booting clients may use the source address 0.0.0.0. Some
1281
* IGMP daemons may not know how to use IP_RECVIF to determine
1282
* the interface upon which this message was received.
1283
* Replace 0.0.0.0 with the subnet address if told to do so.
1284
*/
1285
if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1286
IFP_TO_IA(ifp, ia);
1287
if (ia != NULL)
1288
ip->ip_src.s_addr = htonl(ia->ia_subnet);
1289
}
1290
1291
CTR3(KTR_IGMPV3, "process v1 report 0x%08x on ifp %p(%s)",
1292
ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1293
1294
/*
1295
* IGMPv1 report suppression.
1296
* If we are a member of this group, and our membership should be
1297
* reported, stop our group timer and transition to the 'lazy' state.
1298
*/
1299
IN_MULTI_LIST_LOCK();
1300
inm = inm_lookup(ifp, igmp->igmp_group);
1301
if (inm != NULL) {
1302
struct igmp_ifsoftc *igi;
1303
1304
igi = inm->inm_igi;
1305
if (igi == NULL) {
1306
KASSERT(igi != NULL,
1307
("%s: no igi for ifp %p", __func__, ifp));
1308
goto out_locked;
1309
}
1310
1311
IGMPSTAT_INC(igps_rcv_ourreports);
1312
1313
/*
1314
* If we are in IGMPv3 host mode, do not allow the
1315
* other host's IGMPv1 report to suppress our reports
1316
* unless explicitly configured to do so.
1317
*/
1318
if (igi->igi_version == IGMP_VERSION_3) {
1319
if (V_igmp_legacysupp)
1320
igmp_v3_suppress_group_record(inm);
1321
goto out_locked;
1322
}
1323
1324
inm->inm_timer = 0;
1325
1326
switch (inm->inm_state) {
1327
case IGMP_NOT_MEMBER:
1328
case IGMP_SILENT_MEMBER:
1329
break;
1330
case IGMP_IDLE_MEMBER:
1331
case IGMP_LAZY_MEMBER:
1332
case IGMP_AWAKENING_MEMBER:
1333
CTR3(KTR_IGMPV3,
1334
"report suppressed for 0x%08x on ifp %p(%s)",
1335
ntohl(igmp->igmp_group.s_addr), ifp,
1336
ifp->if_xname);
1337
case IGMP_SLEEPING_MEMBER:
1338
inm->inm_state = IGMP_SLEEPING_MEMBER;
1339
break;
1340
case IGMP_REPORTING_MEMBER:
1341
CTR3(KTR_IGMPV3,
1342
"report suppressed for 0x%08x on ifp %p(%s)",
1343
ntohl(igmp->igmp_group.s_addr), ifp,
1344
ifp->if_xname);
1345
if (igi->igi_version == IGMP_VERSION_1)
1346
inm->inm_state = IGMP_LAZY_MEMBER;
1347
else if (igi->igi_version == IGMP_VERSION_2)
1348
inm->inm_state = IGMP_SLEEPING_MEMBER;
1349
break;
1350
case IGMP_G_QUERY_PENDING_MEMBER:
1351
case IGMP_SG_QUERY_PENDING_MEMBER:
1352
case IGMP_LEAVING_MEMBER:
1353
break;
1354
}
1355
}
1356
1357
out_locked:
1358
IN_MULTI_LIST_UNLOCK();
1359
1360
return (0);
1361
}
1362
1363
/*
1364
* Process a received IGMPv2 host membership report.
1365
*
1366
* NOTE: 0.0.0.0 workaround breaks const correctness.
1367
*/
1368
static int
1369
igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1370
/*const*/ struct igmp *igmp)
1371
{
1372
struct in_ifaddr *ia;
1373
struct in_multi *inm;
1374
1375
/*
1376
* Make sure we don't hear our own membership report. Fast
1377
* leave requires knowing that we are the only member of a
1378
* group.
1379
*/
1380
IFP_TO_IA(ifp, ia);
1381
if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) {
1382
return (0);
1383
}
1384
1385
IGMPSTAT_INC(igps_rcv_reports);
1386
1387
if (ifp->if_flags & IFF_LOOPBACK) {
1388
return (0);
1389
}
1390
1391
if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1392
!in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1393
IGMPSTAT_INC(igps_rcv_badreports);
1394
return (EINVAL);
1395
}
1396
1397
/*
1398
* RFC 3376, Section 4.2.13, 9.2, 9.3:
1399
* Booting clients may use the source address 0.0.0.0. Some
1400
* IGMP daemons may not know how to use IP_RECVIF to determine
1401
* the interface upon which this message was received.
1402
* Replace 0.0.0.0 with the subnet address if told to do so.
1403
*/
1404
if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1405
if (ia != NULL)
1406
ip->ip_src.s_addr = htonl(ia->ia_subnet);
1407
}
1408
1409
CTR3(KTR_IGMPV3, "process v2 report 0x%08x on ifp %p(%s)",
1410
ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1411
1412
/*
1413
* IGMPv2 report suppression.
1414
* If we are a member of this group, and our membership should be
1415
* reported, and our group timer is pending or about to be reset,
1416
* stop our group timer by transitioning to the 'lazy' state.
1417
*/
1418
IN_MULTI_LIST_LOCK();
1419
inm = inm_lookup(ifp, igmp->igmp_group);
1420
if (inm != NULL) {
1421
struct igmp_ifsoftc *igi;
1422
1423
igi = inm->inm_igi;
1424
KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp));
1425
1426
IGMPSTAT_INC(igps_rcv_ourreports);
1427
1428
/*
1429
* If we are in IGMPv3 host mode, do not allow the
1430
* other host's IGMPv1 report to suppress our reports
1431
* unless explicitly configured to do so.
1432
*/
1433
if (igi->igi_version == IGMP_VERSION_3) {
1434
if (V_igmp_legacysupp)
1435
igmp_v3_suppress_group_record(inm);
1436
goto out_locked;
1437
}
1438
1439
inm->inm_timer = 0;
1440
1441
switch (inm->inm_state) {
1442
case IGMP_NOT_MEMBER:
1443
case IGMP_SILENT_MEMBER:
1444
case IGMP_SLEEPING_MEMBER:
1445
break;
1446
case IGMP_REPORTING_MEMBER:
1447
case IGMP_IDLE_MEMBER:
1448
case IGMP_AWAKENING_MEMBER:
1449
CTR3(KTR_IGMPV3,
1450
"report suppressed for 0x%08x on ifp %p(%s)",
1451
ntohl(igmp->igmp_group.s_addr), ifp, ifp->if_xname);
1452
case IGMP_LAZY_MEMBER:
1453
inm->inm_state = IGMP_LAZY_MEMBER;
1454
break;
1455
case IGMP_G_QUERY_PENDING_MEMBER:
1456
case IGMP_SG_QUERY_PENDING_MEMBER:
1457
case IGMP_LEAVING_MEMBER:
1458
break;
1459
}
1460
}
1461
1462
out_locked:
1463
IN_MULTI_LIST_UNLOCK();
1464
1465
return (0);
1466
}
1467
1468
int
1469
igmp_input(struct mbuf **mp, int *offp, int proto)
1470
{
1471
int iphlen;
1472
struct ifnet *ifp;
1473
struct igmp *igmp;
1474
struct ip *ip;
1475
struct mbuf *m;
1476
int igmplen;
1477
int minlen;
1478
int queryver;
1479
1480
CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, *mp, *offp);
1481
1482
m = *mp;
1483
ifp = m->m_pkthdr.rcvif;
1484
*mp = NULL;
1485
M_ASSERTMAPPED(m);
1486
1487
IGMPSTAT_INC(igps_rcv_total);
1488
1489
ip = mtod(m, struct ip *);
1490
iphlen = *offp;
1491
igmplen = ntohs(ip->ip_len) - iphlen;
1492
1493
/*
1494
* Validate lengths.
1495
*/
1496
if (igmplen < IGMP_MINLEN) {
1497
IGMPSTAT_INC(igps_rcv_tooshort);
1498
m_freem(m);
1499
return (IPPROTO_DONE);
1500
}
1501
1502
/*
1503
* Always pullup to the minimum size for v1/v2 or v3
1504
* to amortize calls to m_pullup().
1505
*/
1506
minlen = iphlen;
1507
if (igmplen >= IGMP_V3_QUERY_MINLEN)
1508
minlen += IGMP_V3_QUERY_MINLEN;
1509
else
1510
minlen += IGMP_MINLEN;
1511
if ((!M_WRITABLE(m) || m->m_len < minlen) &&
1512
(m = m_pullup(m, minlen)) == NULL) {
1513
IGMPSTAT_INC(igps_rcv_tooshort);
1514
return (IPPROTO_DONE);
1515
}
1516
ip = mtod(m, struct ip *);
1517
1518
/*
1519
* Validate checksum.
1520
*/
1521
m->m_data += iphlen;
1522
m->m_len -= iphlen;
1523
igmp = mtod(m, struct igmp *);
1524
if (in_cksum(m, igmplen)) {
1525
IGMPSTAT_INC(igps_rcv_badsum);
1526
m_freem(m);
1527
return (IPPROTO_DONE);
1528
}
1529
m->m_data -= iphlen;
1530
m->m_len += iphlen;
1531
1532
/*
1533
* IGMP control traffic is link-scope, and must have a TTL of 1.
1534
* DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1535
* probe packets may come from beyond the LAN.
1536
*/
1537
if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) {
1538
IGMPSTAT_INC(igps_rcv_badttl);
1539
m_freem(m);
1540
return (IPPROTO_DONE);
1541
}
1542
1543
switch (igmp->igmp_type) {
1544
case IGMP_HOST_MEMBERSHIP_QUERY:
1545
if (igmplen == IGMP_MINLEN) {
1546
if (igmp->igmp_code == 0)
1547
queryver = IGMP_VERSION_1;
1548
else
1549
queryver = IGMP_VERSION_2;
1550
} else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1551
queryver = IGMP_VERSION_3;
1552
} else {
1553
IGMPSTAT_INC(igps_rcv_tooshort);
1554
m_freem(m);
1555
return (IPPROTO_DONE);
1556
}
1557
1558
switch (queryver) {
1559
case IGMP_VERSION_1:
1560
IGMPSTAT_INC(igps_rcv_v1v2_queries);
1561
if (!V_igmp_v1enable)
1562
break;
1563
if (igmp_input_v1_query(ifp, ip, igmp) != 0) {
1564
m_freem(m);
1565
return (IPPROTO_DONE);
1566
}
1567
break;
1568
1569
case IGMP_VERSION_2:
1570
IGMPSTAT_INC(igps_rcv_v1v2_queries);
1571
if (!V_igmp_v2enable)
1572
break;
1573
if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
1574
m_freem(m);
1575
return (IPPROTO_DONE);
1576
}
1577
break;
1578
1579
case IGMP_VERSION_3: {
1580
struct igmpv3 *igmpv3;
1581
uint16_t igmpv3len;
1582
uint16_t nsrc;
1583
1584
IGMPSTAT_INC(igps_rcv_v3_queries);
1585
igmpv3 = (struct igmpv3 *)igmp;
1586
/*
1587
* Validate length based on source count.
1588
*/
1589
nsrc = ntohs(igmpv3->igmp_numsrc);
1590
if (nsrc * sizeof(in_addr_t) >
1591
UINT16_MAX - iphlen - IGMP_V3_QUERY_MINLEN) {
1592
IGMPSTAT_INC(igps_rcv_tooshort);
1593
m_freem(m);
1594
return (IPPROTO_DONE);
1595
}
1596
/*
1597
* m_pullup() may modify m, so pullup in
1598
* this scope.
1599
*/
1600
igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN +
1601
sizeof(struct in_addr) * nsrc;
1602
if ((!M_WRITABLE(m) ||
1603
m->m_len < igmpv3len) &&
1604
(m = m_pullup(m, igmpv3len)) == NULL) {
1605
IGMPSTAT_INC(igps_rcv_tooshort);
1606
return (IPPROTO_DONE);
1607
}
1608
igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *)
1609
+ iphlen);
1610
if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
1611
m_freem(m);
1612
return (IPPROTO_DONE);
1613
}
1614
}
1615
break;
1616
}
1617
break;
1618
1619
case IGMP_v1_HOST_MEMBERSHIP_REPORT:
1620
if (!V_igmp_v1enable)
1621
break;
1622
if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
1623
m_freem(m);
1624
return (IPPROTO_DONE);
1625
}
1626
break;
1627
1628
case IGMP_v2_HOST_MEMBERSHIP_REPORT:
1629
if (!V_igmp_v2enable)
1630
break;
1631
if (!ip_checkrouteralert(m))
1632
IGMPSTAT_INC(igps_rcv_nora);
1633
if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
1634
m_freem(m);
1635
return (IPPROTO_DONE);
1636
}
1637
break;
1638
1639
case IGMP_v3_HOST_MEMBERSHIP_REPORT:
1640
/*
1641
* Hosts do not need to process IGMPv3 membership reports,
1642
* as report suppression is no longer required.
1643
*/
1644
if (!ip_checkrouteralert(m))
1645
IGMPSTAT_INC(igps_rcv_nora);
1646
break;
1647
1648
default:
1649
break;
1650
}
1651
1652
/*
1653
* Pass all valid IGMP packets up to any process(es) listening on a
1654
* raw IGMP socket.
1655
*/
1656
*mp = m;
1657
return (rip_input(mp, offp, proto));
1658
}
1659
1660
/*
1661
* Fast timeout handler (global).
1662
* VIMAGE: Timeout handlers are expected to service all vimages.
1663
*/
1664
static struct callout igmpfast_callout;
1665
static void
1666
igmp_fasttimo(void *arg __unused)
1667
{
1668
struct epoch_tracker et;
1669
VNET_ITERATOR_DECL(vnet_iter);
1670
1671
NET_EPOCH_ENTER(et);
1672
VNET_LIST_RLOCK_NOSLEEP();
1673
VNET_FOREACH(vnet_iter) {
1674
CURVNET_SET(vnet_iter);
1675
igmp_fasttimo_vnet();
1676
CURVNET_RESTORE();
1677
}
1678
VNET_LIST_RUNLOCK_NOSLEEP();
1679
NET_EPOCH_EXIT(et);
1680
1681
callout_reset(&igmpfast_callout, hz / IGMP_FASTHZ, igmp_fasttimo, NULL);
1682
}
1683
1684
/*
1685
* Fast timeout handler (per-vnet).
1686
*
1687
* VIMAGE: Assume caller has set up our curvnet.
1688
*/
1689
static void
1690
igmp_fasttimo_vnet(void)
1691
{
1692
struct mbufq scq; /* State-change packets */
1693
struct mbufq qrq; /* Query response packets */
1694
struct ifnet *ifp;
1695
struct igmp_ifsoftc *igi;
1696
struct ifmultiaddr *ifma;
1697
struct in_multi *inm;
1698
struct in_multi_head inm_free_tmp;
1699
int loop, uri_fasthz;
1700
1701
loop = 0;
1702
uri_fasthz = 0;
1703
1704
/*
1705
* Quick check to see if any work needs to be done, in order to
1706
* minimize the overhead of fasttimo processing.
1707
* SMPng: XXX Unlocked reads.
1708
*/
1709
if (!V_current_state_timers_running &&
1710
!V_interface_timers_running &&
1711
!V_state_change_timers_running)
1712
return;
1713
1714
SLIST_INIT(&inm_free_tmp);
1715
IN_MULTI_LIST_LOCK();
1716
IGMP_LOCK();
1717
1718
/*
1719
* IGMPv3 General Query response timer processing.
1720
*/
1721
if (V_interface_timers_running) {
1722
CTR1(KTR_IGMPV3, "%s: interface timers running", __func__);
1723
1724
V_interface_timers_running = 0;
1725
LIST_FOREACH(igi, &V_igi_head, igi_link) {
1726
if (igi->igi_v3_timer == 0) {
1727
/* Do nothing. */
1728
} else if (--igi->igi_v3_timer == 0) {
1729
igmp_v3_dispatch_general_query(igi);
1730
} else {
1731
V_interface_timers_running = 1;
1732
}
1733
}
1734
}
1735
1736
if (!V_current_state_timers_running &&
1737
!V_state_change_timers_running)
1738
goto out_locked;
1739
1740
V_current_state_timers_running = 0;
1741
V_state_change_timers_running = 0;
1742
1743
CTR1(KTR_IGMPV3, "%s: state change timers running", __func__);
1744
1745
/*
1746
* IGMPv1/v2/v3 host report and state-change timer processing.
1747
* Note: Processing a v3 group timer may remove a node.
1748
*/
1749
LIST_FOREACH(igi, &V_igi_head, igi_link) {
1750
ifp = igi->igi_ifp;
1751
1752
if (igi->igi_version == IGMP_VERSION_3) {
1753
loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
1754
uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
1755
IGMP_FASTHZ);
1756
mbufq_init(&qrq, IGMP_MAX_G_GS_PACKETS);
1757
mbufq_init(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
1758
}
1759
1760
IF_ADDR_WLOCK(ifp);
1761
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1762
inm = inm_ifmultiaddr_get_inm(ifma);
1763
if (inm == NULL)
1764
continue;
1765
switch (igi->igi_version) {
1766
case IGMP_VERSION_1:
1767
case IGMP_VERSION_2:
1768
igmp_v1v2_process_group_timer(inm,
1769
igi->igi_version);
1770
break;
1771
case IGMP_VERSION_3:
1772
igmp_v3_process_group_timers(&inm_free_tmp, &qrq,
1773
&scq, inm, uri_fasthz);
1774
break;
1775
}
1776
}
1777
IF_ADDR_WUNLOCK(ifp);
1778
1779
if (igi->igi_version == IGMP_VERSION_3) {
1780
igmp_dispatch_queue(&qrq, 0, loop);
1781
igmp_dispatch_queue(&scq, 0, loop);
1782
1783
/*
1784
* Free the in_multi reference(s) for this
1785
* IGMP lifecycle.
1786
*/
1787
inm_release_list_deferred(&inm_free_tmp);
1788
}
1789
}
1790
1791
out_locked:
1792
IGMP_UNLOCK();
1793
IN_MULTI_LIST_UNLOCK();
1794
}
1795
1796
/*
1797
* Update host report group timer for IGMPv1/v2.
1798
* Will update the global pending timer flags.
1799
*/
1800
static void
1801
igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
1802
{
1803
int report_timer_expired;
1804
1805
IN_MULTI_LIST_LOCK_ASSERT();
1806
IGMP_LOCK_ASSERT();
1807
1808
if (inm->inm_timer == 0) {
1809
report_timer_expired = 0;
1810
} else if (--inm->inm_timer == 0) {
1811
report_timer_expired = 1;
1812
} else {
1813
V_current_state_timers_running = 1;
1814
return;
1815
}
1816
1817
switch (inm->inm_state) {
1818
case IGMP_NOT_MEMBER:
1819
case IGMP_SILENT_MEMBER:
1820
case IGMP_IDLE_MEMBER:
1821
case IGMP_LAZY_MEMBER:
1822
case IGMP_SLEEPING_MEMBER:
1823
case IGMP_AWAKENING_MEMBER:
1824
break;
1825
case IGMP_REPORTING_MEMBER:
1826
if (report_timer_expired) {
1827
inm->inm_state = IGMP_IDLE_MEMBER;
1828
(void)igmp_v1v2_queue_report(inm,
1829
(version == IGMP_VERSION_2) ?
1830
IGMP_v2_HOST_MEMBERSHIP_REPORT :
1831
IGMP_v1_HOST_MEMBERSHIP_REPORT);
1832
}
1833
break;
1834
case IGMP_G_QUERY_PENDING_MEMBER:
1835
case IGMP_SG_QUERY_PENDING_MEMBER:
1836
case IGMP_LEAVING_MEMBER:
1837
break;
1838
}
1839
}
1840
1841
/*
1842
* Update a group's timers for IGMPv3.
1843
* Will update the global pending timer flags.
1844
* Note: Unlocked read from igi.
1845
*/
1846
static void
1847
igmp_v3_process_group_timers(struct in_multi_head *inmh,
1848
struct mbufq *qrq, struct mbufq *scq,
1849
struct in_multi *inm, const int uri_fasthz)
1850
{
1851
int query_response_timer_expired;
1852
int state_change_retransmit_timer_expired;
1853
1854
IN_MULTI_LIST_LOCK_ASSERT();
1855
IGMP_LOCK_ASSERT();
1856
1857
query_response_timer_expired = 0;
1858
state_change_retransmit_timer_expired = 0;
1859
1860
/*
1861
* During a transition from v1/v2 compatibility mode back to v3,
1862
* a group record in REPORTING state may still have its group
1863
* timer active. This is a no-op in this function; it is easier
1864
* to deal with it here than to complicate the slow-timeout path.
1865
*/
1866
if (inm->inm_timer == 0) {
1867
query_response_timer_expired = 0;
1868
} else if (--inm->inm_timer == 0) {
1869
query_response_timer_expired = 1;
1870
} else {
1871
V_current_state_timers_running = 1;
1872
}
1873
1874
if (inm->inm_sctimer == 0) {
1875
state_change_retransmit_timer_expired = 0;
1876
} else if (--inm->inm_sctimer == 0) {
1877
state_change_retransmit_timer_expired = 1;
1878
} else {
1879
V_state_change_timers_running = 1;
1880
}
1881
1882
/* We are in fasttimo, so be quick about it. */
1883
if (!state_change_retransmit_timer_expired &&
1884
!query_response_timer_expired)
1885
return;
1886
1887
switch (inm->inm_state) {
1888
case IGMP_NOT_MEMBER:
1889
case IGMP_SILENT_MEMBER:
1890
case IGMP_SLEEPING_MEMBER:
1891
case IGMP_LAZY_MEMBER:
1892
case IGMP_AWAKENING_MEMBER:
1893
case IGMP_IDLE_MEMBER:
1894
break;
1895
case IGMP_G_QUERY_PENDING_MEMBER:
1896
case IGMP_SG_QUERY_PENDING_MEMBER:
1897
/*
1898
* Respond to a previously pending Group-Specific
1899
* or Group-and-Source-Specific query by enqueueing
1900
* the appropriate Current-State report for
1901
* immediate transmission.
1902
*/
1903
if (query_response_timer_expired) {
1904
int retval __unused;
1905
1906
retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
1907
(inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER));
1908
CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
1909
__func__, retval);
1910
inm->inm_state = IGMP_REPORTING_MEMBER;
1911
/* XXX Clear recorded sources for next time. */
1912
inm_clear_recorded(inm);
1913
}
1914
/* FALLTHROUGH */
1915
case IGMP_REPORTING_MEMBER:
1916
case IGMP_LEAVING_MEMBER:
1917
if (state_change_retransmit_timer_expired) {
1918
/*
1919
* State-change retransmission timer fired.
1920
* If there are any further pending retransmissions,
1921
* set the global pending state-change flag, and
1922
* reset the timer.
1923
*/
1924
if (--inm->inm_scrv > 0) {
1925
inm->inm_sctimer = uri_fasthz;
1926
V_state_change_timers_running = 1;
1927
}
1928
/*
1929
* Retransmit the previously computed state-change
1930
* report. If there are no further pending
1931
* retransmissions, the mbuf queue will be consumed.
1932
* Update T0 state to T1 as we have now sent
1933
* a state-change.
1934
*/
1935
(void)igmp_v3_merge_state_changes(inm, scq);
1936
1937
inm_commit(inm);
1938
CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
1939
ntohl(inm->inm_addr.s_addr),
1940
inm->inm_ifp->if_xname);
1941
1942
/*
1943
* If we are leaving the group for good, make sure
1944
* we release IGMP's reference to it.
1945
* This release must be deferred using a SLIST,
1946
* as we are called from a loop which traverses
1947
* the in_ifmultiaddr TAILQ.
1948
*/
1949
if (inm->inm_state == IGMP_LEAVING_MEMBER &&
1950
inm->inm_scrv == 0) {
1951
inm->inm_state = IGMP_NOT_MEMBER;
1952
inm_rele_locked(inmh, inm);
1953
}
1954
}
1955
break;
1956
}
1957
}
1958
1959
/*
1960
* Suppress a group's pending response to a group or source/group query.
1961
*
1962
* Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
1963
* Do NOT update ST1/ST0 as this operation merely suppresses
1964
* the currently pending group record.
1965
* Do NOT suppress the response to a general query. It is possible but
1966
* it would require adding another state or flag.
1967
*/
1968
static void
1969
igmp_v3_suppress_group_record(struct in_multi *inm)
1970
{
1971
1972
IN_MULTI_LIST_LOCK_ASSERT();
1973
1974
KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
1975
("%s: not IGMPv3 mode on link", __func__));
1976
1977
if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER ||
1978
inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER)
1979
return;
1980
1981
if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
1982
inm_clear_recorded(inm);
1983
1984
inm->inm_timer = 0;
1985
inm->inm_state = IGMP_REPORTING_MEMBER;
1986
}
1987
1988
/*
1989
* Switch to a different IGMP version on the given interface,
1990
* as per Section 7.2.1.
1991
*/
1992
static void
1993
igmp_set_version(struct igmp_ifsoftc *igi, const int version)
1994
{
1995
int old_version_timer;
1996
1997
IGMP_LOCK_ASSERT();
1998
1999
CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__,
2000
version, igi->igi_ifp, igi->igi_ifp->if_xname);
2001
2002
if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) {
2003
/*
2004
* Compute the "Older Version Querier Present" timer as per
2005
* Section 8.12.
2006
*/
2007
old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
2008
old_version_timer *= IGMP_SLOWHZ;
2009
2010
if (version == IGMP_VERSION_1) {
2011
igi->igi_v1_timer = old_version_timer;
2012
igi->igi_v2_timer = 0;
2013
} else if (version == IGMP_VERSION_2) {
2014
igi->igi_v1_timer = 0;
2015
igi->igi_v2_timer = old_version_timer;
2016
}
2017
}
2018
2019
if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2020
if (igi->igi_version != IGMP_VERSION_2) {
2021
igi->igi_version = IGMP_VERSION_2;
2022
igmp_v3_cancel_link_timers(igi);
2023
}
2024
} else if (igi->igi_v1_timer > 0) {
2025
if (igi->igi_version != IGMP_VERSION_1) {
2026
igi->igi_version = IGMP_VERSION_1;
2027
igmp_v3_cancel_link_timers(igi);
2028
}
2029
}
2030
}
2031
2032
/*
2033
* Cancel pending IGMPv3 timers for the given link and all groups
2034
* joined on it; state-change, general-query, and group-query timers.
2035
*
2036
* Only ever called on a transition from v3 to Compatibility mode. Kill
2037
* the timers stone dead (this may be expensive for large N groups), they
2038
* will be restarted if Compatibility Mode deems that they must be due to
2039
* query processing.
2040
*/
2041
static void
2042
igmp_v3_cancel_link_timers(struct igmp_ifsoftc *igi)
2043
{
2044
struct ifmultiaddr *ifma;
2045
struct ifnet *ifp;
2046
struct in_multi *inm;
2047
struct in_multi_head inm_free_tmp;
2048
2049
CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
2050
igi->igi_ifp, igi->igi_ifp->if_xname);
2051
2052
IN_MULTI_LIST_LOCK_ASSERT();
2053
IGMP_LOCK_ASSERT();
2054
NET_EPOCH_ASSERT();
2055
2056
SLIST_INIT(&inm_free_tmp);
2057
2058
/*
2059
* Stop the v3 General Query Response on this link stone dead.
2060
* If fasttimo is woken up due to V_interface_timers_running,
2061
* the flag will be cleared if there are no pending link timers.
2062
*/
2063
igi->igi_v3_timer = 0;
2064
2065
/*
2066
* Now clear the current-state and state-change report timers
2067
* for all memberships scoped to this link.
2068
*/
2069
ifp = igi->igi_ifp;
2070
IF_ADDR_WLOCK(ifp);
2071
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2072
inm = inm_ifmultiaddr_get_inm(ifma);
2073
if (inm == NULL)
2074
continue;
2075
switch (inm->inm_state) {
2076
case IGMP_NOT_MEMBER:
2077
case IGMP_SILENT_MEMBER:
2078
case IGMP_IDLE_MEMBER:
2079
case IGMP_LAZY_MEMBER:
2080
case IGMP_SLEEPING_MEMBER:
2081
case IGMP_AWAKENING_MEMBER:
2082
/*
2083
* These states are either not relevant in v3 mode,
2084
* or are unreported. Do nothing.
2085
*/
2086
break;
2087
case IGMP_LEAVING_MEMBER:
2088
/*
2089
* If we are leaving the group and switching to
2090
* compatibility mode, we need to release the final
2091
* reference held for issuing the INCLUDE {}, and
2092
* transition to REPORTING to ensure the host leave
2093
* message is sent upstream to the old querier --
2094
* transition to NOT would lose the leave and race.
2095
*/
2096
inm_rele_locked(&inm_free_tmp, inm);
2097
/* FALLTHROUGH */
2098
case IGMP_G_QUERY_PENDING_MEMBER:
2099
case IGMP_SG_QUERY_PENDING_MEMBER:
2100
inm_clear_recorded(inm);
2101
/* FALLTHROUGH */
2102
case IGMP_REPORTING_MEMBER:
2103
inm->inm_state = IGMP_REPORTING_MEMBER;
2104
break;
2105
}
2106
/*
2107
* Always clear state-change and group report timers.
2108
* Free any pending IGMPv3 state-change records.
2109
*/
2110
inm->inm_sctimer = 0;
2111
inm->inm_timer = 0;
2112
mbufq_drain(&inm->inm_scq);
2113
}
2114
IF_ADDR_WUNLOCK(ifp);
2115
2116
inm_release_list_deferred(&inm_free_tmp);
2117
}
2118
2119
/*
2120
* Update the Older Version Querier Present timers for a link.
2121
* See Section 7.2.1 of RFC 3376.
2122
*/
2123
static void
2124
igmp_v1v2_process_querier_timers(struct igmp_ifsoftc *igi)
2125
{
2126
2127
IGMP_LOCK_ASSERT();
2128
2129
if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
2130
/*
2131
* IGMPv1 and IGMPv2 Querier Present timers expired.
2132
*
2133
* Revert to IGMPv3.
2134
*/
2135
if (V_igmp_default_version == IGMP_VERSION_3 &&
2136
igi->igi_version != IGMP_VERSION_3) {
2137
CTR5(KTR_IGMPV3,
2138
"%s: transition from v%d -> v%d on %p(%s)",
2139
__func__, igi->igi_version, IGMP_VERSION_3,
2140
igi->igi_ifp, igi->igi_ifp->if_xname);
2141
igi->igi_version = IGMP_VERSION_3;
2142
}
2143
} else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2144
/*
2145
* IGMPv1 Querier Present timer expired,
2146
* IGMPv2 Querier Present timer running.
2147
* If IGMPv2 was disabled since last timeout,
2148
* revert to IGMPv3.
2149
* If IGMPv2 is enabled, revert to IGMPv2.
2150
*/
2151
if (V_igmp_default_version == IGMP_VERSION_3 &&
2152
!V_igmp_v2enable) {
2153
CTR5(KTR_IGMPV3,
2154
"%s: transition from v%d -> v%d on %p(%s)",
2155
__func__, igi->igi_version, IGMP_VERSION_3,
2156
igi->igi_ifp, igi->igi_ifp->if_xname);
2157
igi->igi_v2_timer = 0;
2158
igi->igi_version = IGMP_VERSION_3;
2159
} else {
2160
--igi->igi_v2_timer;
2161
if (V_igmp_default_version == IGMP_VERSION_2 &&
2162
igi->igi_version != IGMP_VERSION_2) {
2163
CTR5(KTR_IGMPV3,
2164
"%s: transition from v%d -> v%d on %p(%s)",
2165
__func__, igi->igi_version, IGMP_VERSION_2,
2166
igi->igi_ifp, igi->igi_ifp->if_xname);
2167
igi->igi_version = IGMP_VERSION_2;
2168
igmp_v3_cancel_link_timers(igi);
2169
}
2170
}
2171
} else if (igi->igi_v1_timer > 0) {
2172
/*
2173
* IGMPv1 Querier Present timer running.
2174
* Stop IGMPv2 timer if running.
2175
*
2176
* If IGMPv1 was disabled since last timeout,
2177
* revert to IGMPv3.
2178
* If IGMPv1 is enabled, reset IGMPv2 timer if running.
2179
*/
2180
if (V_igmp_default_version == IGMP_VERSION_3 &&
2181
!V_igmp_v1enable) {
2182
CTR5(KTR_IGMPV3,
2183
"%s: transition from v%d -> v%d on %p(%s)",
2184
__func__, igi->igi_version, IGMP_VERSION_3,
2185
igi->igi_ifp, igi->igi_ifp->if_xname);
2186
igi->igi_v1_timer = 0;
2187
igi->igi_version = IGMP_VERSION_3;
2188
} else {
2189
--igi->igi_v1_timer;
2190
}
2191
if (igi->igi_v2_timer > 0) {
2192
CTR3(KTR_IGMPV3,
2193
"%s: cancel v2 timer on %p(%s)",
2194
__func__, igi->igi_ifp, igi->igi_ifp->if_xname);
2195
igi->igi_v2_timer = 0;
2196
}
2197
}
2198
}
2199
2200
/*
2201
* Global slowtimo handler.
2202
* VIMAGE: Timeout handlers are expected to service all vimages.
2203
*/
2204
static struct callout igmpslow_callout;
2205
static void
2206
igmp_slowtimo(void *arg __unused)
2207
{
2208
struct epoch_tracker et;
2209
VNET_ITERATOR_DECL(vnet_iter);
2210
2211
NET_EPOCH_ENTER(et);
2212
VNET_LIST_RLOCK_NOSLEEP();
2213
VNET_FOREACH(vnet_iter) {
2214
CURVNET_SET(vnet_iter);
2215
igmp_slowtimo_vnet();
2216
CURVNET_RESTORE();
2217
}
2218
VNET_LIST_RUNLOCK_NOSLEEP();
2219
NET_EPOCH_EXIT(et);
2220
2221
callout_reset(&igmpslow_callout, hz / IGMP_SLOWHZ, igmp_slowtimo, NULL);
2222
}
2223
2224
/*
2225
* Per-vnet slowtimo handler.
2226
*/
2227
static void
2228
igmp_slowtimo_vnet(void)
2229
{
2230
struct igmp_ifsoftc *igi;
2231
2232
IGMP_LOCK();
2233
2234
LIST_FOREACH(igi, &V_igi_head, igi_link) {
2235
igmp_v1v2_process_querier_timers(igi);
2236
}
2237
2238
IGMP_UNLOCK();
2239
}
2240
2241
/*
2242
* Dispatch an IGMPv1/v2 host report or leave message.
2243
* These are always small enough to fit inside a single mbuf.
2244
*/
2245
static int
2246
igmp_v1v2_queue_report(struct in_multi *inm, const int type)
2247
{
2248
struct epoch_tracker et;
2249
struct ifnet *ifp;
2250
struct igmp *igmp;
2251
struct ip *ip;
2252
struct mbuf *m;
2253
2254
IN_MULTI_LIST_LOCK_ASSERT();
2255
IGMP_LOCK_ASSERT();
2256
2257
ifp = inm->inm_ifp;
2258
2259
m = m_gethdr(M_NOWAIT, MT_DATA);
2260
if (m == NULL)
2261
return (ENOMEM);
2262
M_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
2263
2264
m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
2265
2266
m->m_data += sizeof(struct ip);
2267
m->m_len = sizeof(struct igmp);
2268
2269
igmp = mtod(m, struct igmp *);
2270
igmp->igmp_type = type;
2271
igmp->igmp_code = 0;
2272
igmp->igmp_group = inm->inm_addr;
2273
igmp->igmp_cksum = 0;
2274
igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
2275
2276
m->m_data -= sizeof(struct ip);
2277
m->m_len += sizeof(struct ip);
2278
2279
ip = mtod(m, struct ip *);
2280
ip->ip_tos = 0;
2281
ip->ip_len = htons(sizeof(struct ip) + sizeof(struct igmp));
2282
ip->ip_off = 0;
2283
ip->ip_p = IPPROTO_IGMP;
2284
ip->ip_src.s_addr = INADDR_ANY;
2285
2286
if (type == IGMP_HOST_LEAVE_MESSAGE)
2287
ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
2288
else
2289
ip->ip_dst = inm->inm_addr;
2290
2291
igmp_save_context(m, ifp);
2292
2293
m->m_flags |= M_IGMPV2;
2294
if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
2295
m->m_flags |= M_IGMP_LOOP;
2296
2297
CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m);
2298
NET_EPOCH_ENTER(et);
2299
netisr_dispatch(NETISR_IGMP, m);
2300
NET_EPOCH_EXIT(et);
2301
2302
return (0);
2303
}
2304
2305
/*
2306
* Process a state change from the upper layer for the given IPv4 group.
2307
*
2308
* Each socket holds a reference on the in_multi in its own ip_moptions.
2309
* The socket layer will have made the necessary updates to.the group
2310
* state, it is now up to IGMP to issue a state change report if there
2311
* has been any change between T0 (when the last state-change was issued)
2312
* and T1 (now).
2313
*
2314
* We use the IGMPv3 state machine at group level. The IGMP module
2315
* however makes the decision as to which IGMP protocol version to speak.
2316
* A state change *from* INCLUDE {} always means an initial join.
2317
* A state change *to* INCLUDE {} always means a final leave.
2318
*
2319
* FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2320
* save ourselves a bunch of work; any exclusive mode groups need not
2321
* compute source filter lists.
2322
*
2323
* VIMAGE: curvnet should have been set by caller, as this routine
2324
* is called from the socket option handlers.
2325
*/
2326
int
2327
igmp_change_state(struct in_multi *inm)
2328
{
2329
struct igmp_ifsoftc *igi;
2330
struct ifnet *ifp;
2331
int error;
2332
2333
error = 0;
2334
IN_MULTI_LOCK_ASSERT();
2335
/*
2336
* Try to detect if the upper layer just asked us to change state
2337
* for an interface which has now gone away.
2338
*/
2339
KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
2340
ifp = inm->inm_ifma->ifma_ifp;
2341
if (ifp == NULL)
2342
return (0);
2343
/*
2344
* Sanity check that netinet's notion of ifp is the
2345
* same as net's.
2346
*/
2347
KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
2348
2349
IGMP_LOCK();
2350
2351
igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
2352
KASSERT(igi != NULL, ("%s: no igmp_ifsoftc for ifp %p", __func__, ifp));
2353
2354
/*
2355
* If we detect a state transition to or from MCAST_UNDEFINED
2356
* for this group, then we are starting or finishing an IGMP
2357
* life cycle for this group.
2358
*/
2359
if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
2360
CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__,
2361
inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode);
2362
if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
2363
CTR1(KTR_IGMPV3, "%s: initial join", __func__);
2364
error = igmp_initial_join(inm, igi);
2365
goto out_locked;
2366
} else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
2367
CTR1(KTR_IGMPV3, "%s: final leave", __func__);
2368
igmp_final_leave(inm, igi);
2369
goto out_locked;
2370
}
2371
} else {
2372
CTR1(KTR_IGMPV3, "%s: filter set change", __func__);
2373
}
2374
2375
error = igmp_handle_state_change(inm, igi);
2376
2377
out_locked:
2378
IGMP_UNLOCK();
2379
return (error);
2380
}
2381
2382
/*
2383
* Perform the initial join for an IGMP group.
2384
*
2385
* When joining a group:
2386
* If the group should have its IGMP traffic suppressed, do nothing.
2387
* IGMPv1 starts sending IGMPv1 host membership reports.
2388
* IGMPv2 starts sending IGMPv2 host membership reports.
2389
* IGMPv3 will schedule an IGMPv3 state-change report containing the
2390
* initial state of the membership.
2391
*/
2392
static int
2393
igmp_initial_join(struct in_multi *inm, struct igmp_ifsoftc *igi)
2394
{
2395
struct ifnet *ifp;
2396
struct mbufq *mq;
2397
int error, retval, syncstates;
2398
2399
CTR4(KTR_IGMPV3, "%s: initial join 0x%08x on ifp %p(%s)", __func__,
2400
ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2401
2402
error = 0;
2403
syncstates = 1;
2404
2405
ifp = inm->inm_ifp;
2406
2407
IN_MULTI_LOCK_ASSERT();
2408
IGMP_LOCK_ASSERT();
2409
2410
KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2411
2412
/*
2413
* Groups joined on loopback or marked as 'not reported',
2414
* e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2415
* are never reported in any IGMP protocol exchanges.
2416
* All other groups enter the appropriate IGMP state machine
2417
* for the version in use on this link.
2418
* A link marked as IGIF_SILENT causes IGMP to be completely
2419
* disabled for the link.
2420
*/
2421
if ((ifp->if_flags & IFF_LOOPBACK) ||
2422
(igi->igi_flags & IGIF_SILENT) ||
2423
!igmp_isgroupreported(inm->inm_addr)) {
2424
CTR1(KTR_IGMPV3,
2425
"%s: not kicking state machine for silent group", __func__);
2426
inm->inm_state = IGMP_SILENT_MEMBER;
2427
inm->inm_timer = 0;
2428
} else {
2429
/*
2430
* Deal with overlapping in_multi lifecycle.
2431
* If this group was LEAVING, then make sure
2432
* we drop the reference we picked up to keep the
2433
* group around for the final INCLUDE {} enqueue.
2434
*/
2435
if (igi->igi_version == IGMP_VERSION_3 &&
2436
inm->inm_state == IGMP_LEAVING_MEMBER) {
2437
MPASS(inm->inm_refcount > 1);
2438
inm_rele_locked(NULL, inm);
2439
}
2440
inm->inm_state = IGMP_REPORTING_MEMBER;
2441
2442
switch (igi->igi_version) {
2443
case IGMP_VERSION_1:
2444
case IGMP_VERSION_2:
2445
inm->inm_state = IGMP_IDLE_MEMBER;
2446
error = igmp_v1v2_queue_report(inm,
2447
(igi->igi_version == IGMP_VERSION_2) ?
2448
IGMP_v2_HOST_MEMBERSHIP_REPORT :
2449
IGMP_v1_HOST_MEMBERSHIP_REPORT);
2450
if (error == 0) {
2451
inm->inm_timer = IGMP_RANDOM_DELAY(
2452
IGMP_V1V2_MAX_RI * IGMP_FASTHZ);
2453
V_current_state_timers_running = 1;
2454
}
2455
break;
2456
2457
case IGMP_VERSION_3:
2458
/*
2459
* Defer update of T0 to T1, until the first copy
2460
* of the state change has been transmitted.
2461
*/
2462
syncstates = 0;
2463
2464
/*
2465
* Immediately enqueue a State-Change Report for
2466
* this interface, freeing any previous reports.
2467
* Don't kick the timers if there is nothing to do,
2468
* or if an error occurred.
2469
*/
2470
mq = &inm->inm_scq;
2471
mbufq_drain(mq);
2472
retval = igmp_v3_enqueue_group_record(mq, inm, 1,
2473
0, 0);
2474
CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
2475
__func__, retval);
2476
if (retval <= 0) {
2477
error = retval * -1;
2478
break;
2479
}
2480
2481
/*
2482
* Schedule transmission of pending state-change
2483
* report up to RV times for this link. The timer
2484
* will fire at the next igmp_fasttimo (~200ms),
2485
* giving us an opportunity to merge the reports.
2486
*/
2487
if (igi->igi_flags & IGIF_LOOPBACK) {
2488
inm->inm_scrv = 1;
2489
} else {
2490
KASSERT(igi->igi_rv > 1,
2491
("%s: invalid robustness %d", __func__,
2492
igi->igi_rv));
2493
inm->inm_scrv = igi->igi_rv;
2494
}
2495
inm->inm_sctimer = 1;
2496
V_state_change_timers_running = 1;
2497
2498
error = 0;
2499
break;
2500
}
2501
}
2502
2503
/*
2504
* Only update the T0 state if state change is atomic,
2505
* i.e. we don't need to wait for a timer to fire before we
2506
* can consider the state change to have been communicated.
2507
*/
2508
if (syncstates) {
2509
inm_commit(inm);
2510
CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2511
ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2512
}
2513
2514
return (error);
2515
}
2516
2517
/*
2518
* Issue an intermediate state change during the IGMP life-cycle.
2519
*/
2520
static int
2521
igmp_handle_state_change(struct in_multi *inm, struct igmp_ifsoftc *igi)
2522
{
2523
struct ifnet *ifp;
2524
int retval;
2525
2526
CTR4(KTR_IGMPV3, "%s: state change for 0x%08x on ifp %p(%s)", __func__,
2527
ntohl(inm->inm_addr.s_addr), inm->inm_ifp, inm->inm_ifp->if_xname);
2528
2529
ifp = inm->inm_ifp;
2530
2531
IN_MULTI_LIST_LOCK_ASSERT();
2532
IGMP_LOCK_ASSERT();
2533
2534
KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2535
2536
if ((ifp->if_flags & IFF_LOOPBACK) ||
2537
(igi->igi_flags & IGIF_SILENT) ||
2538
!igmp_isgroupreported(inm->inm_addr) ||
2539
(igi->igi_version != IGMP_VERSION_3)) {
2540
if (!igmp_isgroupreported(inm->inm_addr)) {
2541
CTR1(KTR_IGMPV3,
2542
"%s: not kicking state machine for silent group", __func__);
2543
}
2544
CTR1(KTR_IGMPV3, "%s: nothing to do", __func__);
2545
inm_commit(inm);
2546
CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2547
ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2548
return (0);
2549
}
2550
2551
mbufq_drain(&inm->inm_scq);
2552
2553
retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
2554
CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
2555
if (retval <= 0)
2556
return (-retval);
2557
2558
/*
2559
* If record(s) were enqueued, start the state-change
2560
* report timer for this group.
2561
*/
2562
inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
2563
inm->inm_sctimer = 1;
2564
V_state_change_timers_running = 1;
2565
2566
return (0);
2567
}
2568
2569
/*
2570
* Perform the final leave for an IGMP group.
2571
*
2572
* When leaving a group:
2573
* IGMPv1 does nothing.
2574
* IGMPv2 sends a host leave message, if and only if we are the reporter.
2575
* IGMPv3 enqueues a state-change report containing a transition
2576
* to INCLUDE {} for immediate transmission.
2577
*/
2578
static void
2579
igmp_final_leave(struct in_multi *inm, struct igmp_ifsoftc *igi)
2580
{
2581
int syncstates;
2582
2583
syncstates = 1;
2584
2585
CTR4(KTR_IGMPV3, "%s: final leave 0x%08x on ifp %p(%s)",
2586
__func__, ntohl(inm->inm_addr.s_addr), inm->inm_ifp,
2587
inm->inm_ifp->if_xname);
2588
2589
IN_MULTI_LIST_LOCK_ASSERT();
2590
IGMP_LOCK_ASSERT();
2591
2592
switch (inm->inm_state) {
2593
case IGMP_NOT_MEMBER:
2594
case IGMP_SILENT_MEMBER:
2595
case IGMP_LEAVING_MEMBER:
2596
/* Already leaving or left; do nothing. */
2597
CTR1(KTR_IGMPV3,
2598
"%s: not kicking state machine for silent group", __func__);
2599
break;
2600
case IGMP_REPORTING_MEMBER:
2601
case IGMP_IDLE_MEMBER:
2602
case IGMP_G_QUERY_PENDING_MEMBER:
2603
case IGMP_SG_QUERY_PENDING_MEMBER:
2604
if (igi->igi_version == IGMP_VERSION_2) {
2605
#ifdef INVARIANTS
2606
if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
2607
inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
2608
panic("%s: IGMPv3 state reached, not IGMPv3 mode",
2609
__func__);
2610
#endif
2611
igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE);
2612
inm->inm_state = IGMP_NOT_MEMBER;
2613
} else if (igi->igi_version == IGMP_VERSION_3) {
2614
/*
2615
* Stop group timer and all pending reports.
2616
* Immediately enqueue a state-change report
2617
* TO_IN {} to be sent on the next fast timeout,
2618
* giving us an opportunity to merge reports.
2619
*/
2620
mbufq_drain(&inm->inm_scq);
2621
inm->inm_timer = 0;
2622
if (igi->igi_flags & IGIF_LOOPBACK) {
2623
inm->inm_scrv = 1;
2624
} else {
2625
inm->inm_scrv = igi->igi_rv;
2626
}
2627
CTR4(KTR_IGMPV3, "%s: Leaving 0x%08x/%s with %d "
2628
"pending retransmissions.", __func__,
2629
ntohl(inm->inm_addr.s_addr),
2630
inm->inm_ifp->if_xname, inm->inm_scrv);
2631
if (inm->inm_scrv == 0) {
2632
inm->inm_state = IGMP_NOT_MEMBER;
2633
inm->inm_sctimer = 0;
2634
} else {
2635
int retval __unused;
2636
2637
inm_acquire_locked(inm);
2638
2639
retval = igmp_v3_enqueue_group_record(
2640
&inm->inm_scq, inm, 1, 0, 0);
2641
KASSERT(retval != 0,
2642
("%s: enqueue record = %d", __func__,
2643
retval));
2644
2645
inm->inm_state = IGMP_LEAVING_MEMBER;
2646
inm->inm_sctimer = 1;
2647
V_state_change_timers_running = 1;
2648
syncstates = 0;
2649
}
2650
break;
2651
}
2652
break;
2653
case IGMP_LAZY_MEMBER:
2654
case IGMP_SLEEPING_MEMBER:
2655
case IGMP_AWAKENING_MEMBER:
2656
/* Our reports are suppressed; do nothing. */
2657
break;
2658
}
2659
2660
if (syncstates) {
2661
inm_commit(inm);
2662
CTR3(KTR_IGMPV3, "%s: T1 -> T0 for 0x%08x/%s", __func__,
2663
ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2664
inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
2665
CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for 0x%08x/%s",
2666
__func__, ntohl(inm->inm_addr.s_addr),
2667
inm->inm_ifp->if_xname);
2668
}
2669
}
2670
2671
/*
2672
* Enqueue an IGMPv3 group record to the given output queue.
2673
*
2674
* XXX This function could do with having the allocation code
2675
* split out, and the multiple-tree-walks coalesced into a single
2676
* routine as has been done in igmp_v3_enqueue_filter_change().
2677
*
2678
* If is_state_change is zero, a current-state record is appended.
2679
* If is_state_change is non-zero, a state-change report is appended.
2680
*
2681
* If is_group_query is non-zero, an mbuf packet chain is allocated.
2682
* If is_group_query is zero, and if there is a packet with free space
2683
* at the tail of the queue, it will be appended to providing there
2684
* is enough free space.
2685
* Otherwise a new mbuf packet chain is allocated.
2686
*
2687
* If is_source_query is non-zero, each source is checked to see if
2688
* it was recorded for a Group-Source query, and will be omitted if
2689
* it is not both in-mode and recorded.
2690
*
2691
* The function will attempt to allocate leading space in the packet
2692
* for the IP/IGMP header to be prepended without fragmenting the chain.
2693
*
2694
* If successful the size of all data appended to the queue is returned,
2695
* otherwise an error code less than zero is returned, or zero if
2696
* no record(s) were appended.
2697
*/
2698
static int
2699
igmp_v3_enqueue_group_record(struct mbufq *mq, struct in_multi *inm,
2700
const int is_state_change, const int is_group_query,
2701
const int is_source_query)
2702
{
2703
struct igmp_grouprec ig;
2704
struct igmp_grouprec *pig;
2705
struct ifnet *ifp;
2706
struct ip_msource *ims, *nims;
2707
struct mbuf *m0, *m, *md;
2708
int is_filter_list_change;
2709
int minrec0len, m0srcs, msrcs, nbytes, off;
2710
int record_has_sources;
2711
int now;
2712
int type;
2713
in_addr_t naddr;
2714
uint8_t mode;
2715
2716
IN_MULTI_LIST_LOCK_ASSERT();
2717
2718
ifp = inm->inm_ifp;
2719
is_filter_list_change = 0;
2720
m = NULL;
2721
m0 = NULL;
2722
m0srcs = 0;
2723
msrcs = 0;
2724
nbytes = 0;
2725
nims = NULL;
2726
record_has_sources = 1;
2727
pig = NULL;
2728
type = IGMP_DO_NOTHING;
2729
mode = inm->inm_st[1].iss_fmode;
2730
2731
/*
2732
* If we did not transition out of ASM mode during t0->t1,
2733
* and there are no source nodes to process, we can skip
2734
* the generation of source records.
2735
*/
2736
if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
2737
inm->inm_nsrc == 0)
2738
record_has_sources = 0;
2739
2740
if (is_state_change) {
2741
/*
2742
* Queue a state change record.
2743
* If the mode did not change, and there are non-ASM
2744
* listeners or source filters present,
2745
* we potentially need to issue two records for the group.
2746
* If we are transitioning to MCAST_UNDEFINED, we need
2747
* not send any sources.
2748
* If there are ASM listeners, and there was no filter
2749
* mode transition of any kind, do nothing.
2750
*/
2751
if (mode != inm->inm_st[0].iss_fmode) {
2752
if (mode == MCAST_EXCLUDE) {
2753
CTR1(KTR_IGMPV3, "%s: change to EXCLUDE",
2754
__func__);
2755
type = IGMP_CHANGE_TO_EXCLUDE_MODE;
2756
} else {
2757
CTR1(KTR_IGMPV3, "%s: change to INCLUDE",
2758
__func__);
2759
type = IGMP_CHANGE_TO_INCLUDE_MODE;
2760
if (mode == MCAST_UNDEFINED)
2761
record_has_sources = 0;
2762
}
2763
} else {
2764
if (record_has_sources) {
2765
is_filter_list_change = 1;
2766
} else {
2767
type = IGMP_DO_NOTHING;
2768
}
2769
}
2770
} else {
2771
/*
2772
* Queue a current state record.
2773
*/
2774
if (mode == MCAST_EXCLUDE) {
2775
type = IGMP_MODE_IS_EXCLUDE;
2776
} else if (mode == MCAST_INCLUDE) {
2777
type = IGMP_MODE_IS_INCLUDE;
2778
KASSERT(inm->inm_st[1].iss_asm == 0,
2779
("%s: inm %p is INCLUDE but ASM count is %d",
2780
__func__, inm, inm->inm_st[1].iss_asm));
2781
}
2782
}
2783
2784
/*
2785
* Generate the filter list changes using a separate function.
2786
*/
2787
if (is_filter_list_change)
2788
return (igmp_v3_enqueue_filter_change(mq, inm));
2789
2790
if (type == IGMP_DO_NOTHING) {
2791
CTR3(KTR_IGMPV3, "%s: nothing to do for 0x%08x/%s", __func__,
2792
ntohl(inm->inm_addr.s_addr), inm->inm_ifp->if_xname);
2793
return (0);
2794
}
2795
2796
/*
2797
* If any sources are present, we must be able to fit at least
2798
* one in the trailing space of the tail packet's mbuf,
2799
* ideally more.
2800
*/
2801
minrec0len = sizeof(struct igmp_grouprec);
2802
if (record_has_sources)
2803
minrec0len += sizeof(in_addr_t);
2804
2805
CTR4(KTR_IGMPV3, "%s: queueing %s for 0x%08x/%s", __func__,
2806
igmp_rec_type_to_str(type), ntohl(inm->inm_addr.s_addr),
2807
inm->inm_ifp->if_xname);
2808
2809
/*
2810
* Check if we have a packet in the tail of the queue for this
2811
* group into which the first group record for this group will fit.
2812
* Otherwise allocate a new packet.
2813
* Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2814
* Note: Group records for G/GSR query responses MUST be sent
2815
* in their own packet.
2816
*/
2817
m0 = mbufq_last(mq);
2818
if (!is_group_query &&
2819
m0 != NULL &&
2820
(m0->m_pkthdr.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
2821
(m0->m_pkthdr.len + minrec0len) <
2822
(ifp->if_mtu - IGMP_LEADINGSPACE)) {
2823
m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2824
sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2825
m = m0;
2826
CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
2827
} else {
2828
if (mbufq_full(mq)) {
2829
CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2830
return (-ENOMEM);
2831
}
2832
m = NULL;
2833
m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2834
sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2835
if (!is_state_change && !is_group_query) {
2836
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2837
if (m)
2838
m->m_data += IGMP_LEADINGSPACE;
2839
}
2840
if (m == NULL) {
2841
m = m_gethdr(M_NOWAIT, MT_DATA);
2842
if (m)
2843
M_ALIGN(m, IGMP_LEADINGSPACE);
2844
}
2845
if (m == NULL)
2846
return (-ENOMEM);
2847
2848
igmp_save_context(m, ifp);
2849
2850
CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__);
2851
}
2852
2853
/*
2854
* Append group record.
2855
* If we have sources, we don't know how many yet.
2856
*/
2857
ig.ig_type = type;
2858
ig.ig_datalen = 0;
2859
ig.ig_numsrc = 0;
2860
ig.ig_group = inm->inm_addr;
2861
if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2862
if (m != m0)
2863
m_freem(m);
2864
CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2865
return (-ENOMEM);
2866
}
2867
nbytes += sizeof(struct igmp_grouprec);
2868
2869
/*
2870
* Append as many sources as will fit in the first packet.
2871
* If we are appending to a new packet, the chain allocation
2872
* may potentially use clusters; use m_getptr() in this case.
2873
* If we are appending to an existing packet, we need to obtain
2874
* a pointer to the group record after m_append(), in case a new
2875
* mbuf was allocated.
2876
* Only append sources which are in-mode at t1. If we are
2877
* transitioning to MCAST_UNDEFINED state on the group, do not
2878
* include source entries.
2879
* Only report recorded sources in our filter set when responding
2880
* to a group-source query.
2881
*/
2882
if (record_has_sources) {
2883
if (m == m0) {
2884
md = m_last(m);
2885
pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2886
md->m_len - nbytes);
2887
} else {
2888
md = m_getptr(m, 0, &off);
2889
pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2890
off);
2891
}
2892
msrcs = 0;
2893
RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
2894
CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2895
ims->ims_haddr);
2896
now = ims_get_mode(inm, ims, 1);
2897
CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now);
2898
if ((now != mode) ||
2899
(now == mode && mode == MCAST_UNDEFINED)) {
2900
CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2901
continue;
2902
}
2903
if (is_source_query && ims->ims_stp == 0) {
2904
CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2905
__func__);
2906
continue;
2907
}
2908
CTR1(KTR_IGMPV3, "%s: append node", __func__);
2909
naddr = htonl(ims->ims_haddr);
2910
if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2911
if (m != m0)
2912
m_freem(m);
2913
CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2914
__func__);
2915
return (-ENOMEM);
2916
}
2917
nbytes += sizeof(in_addr_t);
2918
++msrcs;
2919
if (msrcs == m0srcs)
2920
break;
2921
}
2922
CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__,
2923
msrcs);
2924
pig->ig_numsrc = htons(msrcs);
2925
nbytes += (msrcs * sizeof(in_addr_t));
2926
}
2927
2928
if (is_source_query && msrcs == 0) {
2929
CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__);
2930
if (m != m0)
2931
m_freem(m);
2932
return (0);
2933
}
2934
2935
/*
2936
* We are good to go with first packet.
2937
*/
2938
if (m != m0) {
2939
CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
2940
m->m_pkthdr.vt_nrecs = 1;
2941
mbufq_enqueue(mq, m);
2942
} else
2943
m->m_pkthdr.vt_nrecs++;
2944
2945
/*
2946
* No further work needed if no source list in packet(s).
2947
*/
2948
if (!record_has_sources)
2949
return (nbytes);
2950
2951
/*
2952
* Whilst sources remain to be announced, we need to allocate
2953
* a new packet and fill out as many sources as will fit.
2954
* Always try for a cluster first.
2955
*/
2956
while (nims != NULL) {
2957
if (mbufq_full(mq)) {
2958
CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2959
return (-ENOMEM);
2960
}
2961
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2962
if (m)
2963
m->m_data += IGMP_LEADINGSPACE;
2964
if (m == NULL) {
2965
m = m_gethdr(M_NOWAIT, MT_DATA);
2966
if (m)
2967
M_ALIGN(m, IGMP_LEADINGSPACE);
2968
}
2969
if (m == NULL)
2970
return (-ENOMEM);
2971
igmp_save_context(m, ifp);
2972
md = m_getptr(m, 0, &off);
2973
pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
2974
CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__);
2975
2976
if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2977
if (m != m0)
2978
m_freem(m);
2979
CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2980
return (-ENOMEM);
2981
}
2982
m->m_pkthdr.vt_nrecs = 1;
2983
nbytes += sizeof(struct igmp_grouprec);
2984
2985
m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2986
sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2987
2988
msrcs = 0;
2989
RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
2990
CTR2(KTR_IGMPV3, "%s: visit node 0x%08x", __func__,
2991
ims->ims_haddr);
2992
now = ims_get_mode(inm, ims, 1);
2993
if ((now != mode) ||
2994
(now == mode && mode == MCAST_UNDEFINED)) {
2995
CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2996
continue;
2997
}
2998
if (is_source_query && ims->ims_stp == 0) {
2999
CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
3000
__func__);
3001
continue;
3002
}
3003
CTR1(KTR_IGMPV3, "%s: append node", __func__);
3004
naddr = htonl(ims->ims_haddr);
3005
if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
3006
if (m != m0)
3007
m_freem(m);
3008
CTR1(KTR_IGMPV3, "%s: m_append() failed.",
3009
__func__);
3010
return (-ENOMEM);
3011
}
3012
++msrcs;
3013
if (msrcs == m0srcs)
3014
break;
3015
}
3016
pig->ig_numsrc = htons(msrcs);
3017
nbytes += (msrcs * sizeof(in_addr_t));
3018
3019
CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
3020
mbufq_enqueue(mq, m);
3021
}
3022
3023
return (nbytes);
3024
}
3025
3026
/*
3027
* Type used to mark record pass completion.
3028
* We exploit the fact we can cast to this easily from the
3029
* current filter modes on each ip_msource node.
3030
*/
3031
typedef enum {
3032
REC_NONE = 0x00, /* MCAST_UNDEFINED */
3033
REC_ALLOW = 0x01, /* MCAST_INCLUDE */
3034
REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
3035
REC_FULL = REC_ALLOW | REC_BLOCK
3036
} rectype_t;
3037
3038
/*
3039
* Enqueue an IGMPv3 filter list change to the given output queue.
3040
*
3041
* Source list filter state is held in an RB-tree. When the filter list
3042
* for a group is changed without changing its mode, we need to compute
3043
* the deltas between T0 and T1 for each source in the filter set,
3044
* and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
3045
*
3046
* As we may potentially queue two record types, and the entire R-B tree
3047
* needs to be walked at once, we break this out into its own function
3048
* so we can generate a tightly packed queue of packets.
3049
*
3050
* XXX This could be written to only use one tree walk, although that makes
3051
* serializing into the mbuf chains a bit harder. For now we do two walks
3052
* which makes things easier on us, and it may or may not be harder on
3053
* the L2 cache.
3054
*
3055
* If successful the size of all data appended to the queue is returned,
3056
* otherwise an error code less than zero is returned, or zero if
3057
* no record(s) were appended.
3058
*/
3059
static int
3060
igmp_v3_enqueue_filter_change(struct mbufq *mq, struct in_multi *inm)
3061
{
3062
static const int MINRECLEN =
3063
sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
3064
struct ifnet *ifp;
3065
struct igmp_grouprec ig;
3066
struct igmp_grouprec *pig;
3067
struct ip_msource *ims, *nims;
3068
struct mbuf *m, *m0, *md;
3069
in_addr_t naddr;
3070
int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
3071
#ifdef KTR
3072
int nallow, nblock;
3073
#endif
3074
uint8_t mode, now, then;
3075
rectype_t crt, drt, nrt;
3076
3077
IN_MULTI_LIST_LOCK_ASSERT();
3078
3079
if (inm->inm_nsrc == 0 ||
3080
(inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
3081
return (0);
3082
3083
ifp = inm->inm_ifp; /* interface */
3084
mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */
3085
crt = REC_NONE; /* current group record type */
3086
drt = REC_NONE; /* mask of completed group record types */
3087
nrt = REC_NONE; /* record type for current node */
3088
m0srcs = 0; /* # source which will fit in current mbuf chain */
3089
nbytes = 0; /* # of bytes appended to group's state-change queue */
3090
npbytes = 0; /* # of bytes appended this packet */
3091
rsrcs = 0; /* # sources encoded in current record */
3092
schanged = 0; /* # nodes encoded in overall filter change */
3093
#ifdef KTR
3094
nallow = 0; /* # of source entries in ALLOW_NEW */
3095
nblock = 0; /* # of source entries in BLOCK_OLD */
3096
#endif
3097
nims = NULL; /* next tree node pointer */
3098
3099
/*
3100
* For each possible filter record mode.
3101
* The first kind of source we encounter tells us which
3102
* is the first kind of record we start appending.
3103
* If a node transitioned to UNDEFINED at t1, its mode is treated
3104
* as the inverse of the group's filter mode.
3105
*/
3106
while (drt != REC_FULL) {
3107
do {
3108
m0 = mbufq_last(mq);
3109
if (m0 != NULL &&
3110
(m0->m_pkthdr.vt_nrecs + 1 <=
3111
IGMP_V3_REPORT_MAXRECS) &&
3112
(m0->m_pkthdr.len + MINRECLEN) <
3113
(ifp->if_mtu - IGMP_LEADINGSPACE)) {
3114
m = m0;
3115
m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3116
sizeof(struct igmp_grouprec)) /
3117
sizeof(in_addr_t);
3118
CTR1(KTR_IGMPV3,
3119
"%s: use previous packet", __func__);
3120
} else {
3121
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3122
if (m)
3123
m->m_data += IGMP_LEADINGSPACE;
3124
if (m == NULL) {
3125
m = m_gethdr(M_NOWAIT, MT_DATA);
3126
if (m)
3127
M_ALIGN(m, IGMP_LEADINGSPACE);
3128
}
3129
if (m == NULL) {
3130
CTR1(KTR_IGMPV3,
3131
"%s: m_get*() failed", __func__);
3132
return (-ENOMEM);
3133
}
3134
m->m_pkthdr.vt_nrecs = 0;
3135
igmp_save_context(m, ifp);
3136
m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3137
sizeof(struct igmp_grouprec)) /
3138
sizeof(in_addr_t);
3139
npbytes = 0;
3140
CTR1(KTR_IGMPV3,
3141
"%s: allocated new packet", __func__);
3142
}
3143
/*
3144
* Append the IGMP group record header to the
3145
* current packet's data area.
3146
* Recalculate pointer to free space for next
3147
* group record, in case m_append() allocated
3148
* a new mbuf or cluster.
3149
*/
3150
memset(&ig, 0, sizeof(ig));
3151
ig.ig_group = inm->inm_addr;
3152
if (!m_append(m, sizeof(ig), (void *)&ig)) {
3153
if (m != m0)
3154
m_freem(m);
3155
CTR1(KTR_IGMPV3,
3156
"%s: m_append() failed", __func__);
3157
return (-ENOMEM);
3158
}
3159
npbytes += sizeof(struct igmp_grouprec);
3160
if (m != m0) {
3161
/* new packet; offset in c hain */
3162
md = m_getptr(m, npbytes -
3163
sizeof(struct igmp_grouprec), &off);
3164
pig = (struct igmp_grouprec *)(mtod(md,
3165
uint8_t *) + off);
3166
} else {
3167
/* current packet; offset from last append */
3168
md = m_last(m);
3169
pig = (struct igmp_grouprec *)(mtod(md,
3170
uint8_t *) + md->m_len -
3171
sizeof(struct igmp_grouprec));
3172
}
3173
/*
3174
* Begin walking the tree for this record type
3175
* pass, or continue from where we left off
3176
* previously if we had to allocate a new packet.
3177
* Only report deltas in-mode at t1.
3178
* We need not report included sources as allowed
3179
* if we are in inclusive mode on the group,
3180
* however the converse is not true.
3181
*/
3182
rsrcs = 0;
3183
if (nims == NULL)
3184
nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
3185
RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3186
CTR2(KTR_IGMPV3, "%s: visit node 0x%08x",
3187
__func__, ims->ims_haddr);
3188
now = ims_get_mode(inm, ims, 1);
3189
then = ims_get_mode(inm, ims, 0);
3190
CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d",
3191
__func__, then, now);
3192
if (now == then) {
3193
CTR1(KTR_IGMPV3,
3194
"%s: skip unchanged", __func__);
3195
continue;
3196
}
3197
if (mode == MCAST_EXCLUDE &&
3198
now == MCAST_INCLUDE) {
3199
CTR1(KTR_IGMPV3,
3200
"%s: skip IN src on EX group",
3201
__func__);
3202
continue;
3203
}
3204
nrt = (rectype_t)now;
3205
if (nrt == REC_NONE)
3206
nrt = (rectype_t)(~mode & REC_FULL);
3207
if (schanged++ == 0) {
3208
crt = nrt;
3209
} else if (crt != nrt)
3210
continue;
3211
naddr = htonl(ims->ims_haddr);
3212
if (!m_append(m, sizeof(in_addr_t),
3213
(void *)&naddr)) {
3214
if (m != m0)
3215
m_freem(m);
3216
CTR1(KTR_IGMPV3,
3217
"%s: m_append() failed", __func__);
3218
return (-ENOMEM);
3219
}
3220
#ifdef KTR
3221
nallow += !!(crt == REC_ALLOW);
3222
nblock += !!(crt == REC_BLOCK);
3223
#endif
3224
if (++rsrcs == m0srcs)
3225
break;
3226
}
3227
/*
3228
* If we did not append any tree nodes on this
3229
* pass, back out of allocations.
3230
*/
3231
if (rsrcs == 0) {
3232
npbytes -= sizeof(struct igmp_grouprec);
3233
if (m != m0) {
3234
CTR1(KTR_IGMPV3,
3235
"%s: m_free(m)", __func__);
3236
m_freem(m);
3237
} else {
3238
CTR1(KTR_IGMPV3,
3239
"%s: m_adj(m, -ig)", __func__);
3240
m_adj(m, -((int)sizeof(
3241
struct igmp_grouprec)));
3242
}
3243
continue;
3244
}
3245
npbytes += (rsrcs * sizeof(in_addr_t));
3246
if (crt == REC_ALLOW)
3247
pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
3248
else if (crt == REC_BLOCK)
3249
pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
3250
pig->ig_numsrc = htons(rsrcs);
3251
/*
3252
* Count the new group record, and enqueue this
3253
* packet if it wasn't already queued.
3254
*/
3255
m->m_pkthdr.vt_nrecs++;
3256
if (m != m0)
3257
mbufq_enqueue(mq, m);
3258
nbytes += npbytes;
3259
} while (nims != NULL);
3260
drt |= crt;
3261
crt = (~crt & REC_FULL);
3262
}
3263
3264
CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
3265
nallow, nblock);
3266
3267
return (nbytes);
3268
}
3269
3270
static int
3271
igmp_v3_merge_state_changes(struct in_multi *inm, struct mbufq *scq)
3272
{
3273
struct mbufq *gq;
3274
struct mbuf *m; /* pending state-change */
3275
struct mbuf *m0; /* copy of pending state-change */
3276
struct mbuf *mt; /* last state-change in packet */
3277
int docopy, domerge;
3278
u_int recslen;
3279
3280
docopy = 0;
3281
domerge = 0;
3282
recslen = 0;
3283
3284
IN_MULTI_LIST_LOCK_ASSERT();
3285
IGMP_LOCK_ASSERT();
3286
3287
/*
3288
* If there are further pending retransmissions, make a writable
3289
* copy of each queued state-change message before merging.
3290
*/
3291
if (inm->inm_scrv > 0)
3292
docopy = 1;
3293
3294
gq = &inm->inm_scq;
3295
#ifdef KTR
3296
if (mbufq_first(gq) == NULL) {
3297
CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
3298
__func__, inm);
3299
}
3300
#endif
3301
3302
m = mbufq_first(gq);
3303
while (m != NULL) {
3304
/*
3305
* Only merge the report into the current packet if
3306
* there is sufficient space to do so; an IGMPv3 report
3307
* packet may only contain 65,535 group records.
3308
* Always use a simple mbuf chain concatentation to do this,
3309
* as large state changes for single groups may have
3310
* allocated clusters.
3311
*/
3312
domerge = 0;
3313
mt = mbufq_last(scq);
3314
if (mt != NULL) {
3315
recslen = m_length(m, NULL);
3316
3317
if ((mt->m_pkthdr.vt_nrecs +
3318
m->m_pkthdr.vt_nrecs <=
3319
IGMP_V3_REPORT_MAXRECS) &&
3320
(mt->m_pkthdr.len + recslen <=
3321
(inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE)))
3322
domerge = 1;
3323
}
3324
3325
if (!domerge && mbufq_full(gq)) {
3326
CTR2(KTR_IGMPV3,
3327
"%s: outbound queue full, skipping whole packet %p",
3328
__func__, m);
3329
mt = m->m_nextpkt;
3330
if (!docopy)
3331
m_freem(m);
3332
m = mt;
3333
continue;
3334
}
3335
3336
if (!docopy) {
3337
CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
3338
m0 = mbufq_dequeue(gq);
3339
m = m0->m_nextpkt;
3340
} else {
3341
CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
3342
m0 = m_dup(m, M_NOWAIT);
3343
if (m0 == NULL)
3344
return (ENOMEM);
3345
m0->m_nextpkt = NULL;
3346
m = m->m_nextpkt;
3347
}
3348
3349
if (!domerge) {
3350
CTR3(KTR_IGMPV3, "%s: queueing %p to scq %p)",
3351
__func__, m0, scq);
3352
mbufq_enqueue(scq, m0);
3353
} else {
3354
struct mbuf *mtl; /* last mbuf of packet mt */
3355
3356
CTR3(KTR_IGMPV3, "%s: merging %p with scq tail %p)",
3357
__func__, m0, mt);
3358
3359
mtl = m_last(mt);
3360
m0->m_flags &= ~M_PKTHDR;
3361
mt->m_pkthdr.len += recslen;
3362
mt->m_pkthdr.vt_nrecs +=
3363
m0->m_pkthdr.vt_nrecs;
3364
3365
mtl->m_next = m0;
3366
}
3367
}
3368
3369
return (0);
3370
}
3371
3372
/*
3373
* Respond to a pending IGMPv3 General Query.
3374
*/
3375
static void
3376
igmp_v3_dispatch_general_query(struct igmp_ifsoftc *igi)
3377
{
3378
struct ifmultiaddr *ifma;
3379
struct ifnet *ifp;
3380
struct in_multi *inm;
3381
int retval __unused, loop;
3382
3383
IN_MULTI_LIST_LOCK_ASSERT();
3384
IGMP_LOCK_ASSERT();
3385
NET_EPOCH_ASSERT();
3386
3387
KASSERT(igi->igi_version == IGMP_VERSION_3,
3388
("%s: called when version %d", __func__, igi->igi_version));
3389
3390
/*
3391
* Check that there are some packets queued. If so, send them first.
3392
* For large number of groups the reply to general query can take
3393
* many packets, we should finish sending them before starting of
3394
* queuing the new reply.
3395
*/
3396
if (!mbufq_empty(&igi->igi_gq))
3397
goto send;
3398
3399
ifp = igi->igi_ifp;
3400
3401
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3402
inm = inm_ifmultiaddr_get_inm(ifma);
3403
if (inm == NULL)
3404
continue;
3405
KASSERT(ifp == inm->inm_ifp,
3406
("%s: inconsistent ifp", __func__));
3407
3408
switch (inm->inm_state) {
3409
case IGMP_NOT_MEMBER:
3410
case IGMP_SILENT_MEMBER:
3411
break;
3412
case IGMP_REPORTING_MEMBER:
3413
case IGMP_IDLE_MEMBER:
3414
case IGMP_LAZY_MEMBER:
3415
case IGMP_SLEEPING_MEMBER:
3416
case IGMP_AWAKENING_MEMBER:
3417
inm->inm_state = IGMP_REPORTING_MEMBER;
3418
retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
3419
inm, 0, 0, 0);
3420
CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
3421
__func__, retval);
3422
break;
3423
case IGMP_G_QUERY_PENDING_MEMBER:
3424
case IGMP_SG_QUERY_PENDING_MEMBER:
3425
case IGMP_LEAVING_MEMBER:
3426
break;
3427
}
3428
}
3429
3430
send:
3431
loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
3432
igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop);
3433
3434
/*
3435
* Slew transmission of bursts over 500ms intervals.
3436
*/
3437
if (mbufq_first(&igi->igi_gq) != NULL) {
3438
igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
3439
IGMP_RESPONSE_BURST_INTERVAL);
3440
V_interface_timers_running = 1;
3441
}
3442
}
3443
3444
/*
3445
* Transmit the next pending IGMP message in the output queue.
3446
*
3447
* We get called from netisr_processqueue(). A mutex private to igmpoq
3448
* will be acquired and released around this routine.
3449
*
3450
* VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3451
* MRT: Nothing needs to be done, as IGMP traffic is always local to
3452
* a link and uses a link-scope multicast address.
3453
*/
3454
static void
3455
igmp_intr(struct mbuf *m)
3456
{
3457
struct ip_moptions imo;
3458
struct ifnet *ifp;
3459
struct mbuf *ipopts, *m0;
3460
int error;
3461
uint32_t ifindex;
3462
3463
CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m);
3464
3465
/*
3466
* Set VNET image pointer from enqueued mbuf chain
3467
* before doing anything else. Whilst we use interface
3468
* indexes to guard against interface detach, they are
3469
* unique to each VIMAGE and must be retrieved.
3470
*/
3471
CURVNET_SET((struct vnet *)(m->m_pkthdr.PH_loc.ptr));
3472
ifindex = igmp_restore_context(m);
3473
3474
/*
3475
* Check if the ifnet still exists. This limits the scope of
3476
* any race in the absence of a global ifp lock for low cost
3477
* (an array lookup).
3478
*/
3479
ifp = ifnet_byindex(ifindex);
3480
if (ifp == NULL) {
3481
CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.",
3482
__func__, m, ifindex);
3483
m_freem(m);
3484
IPSTAT_INC(ips_noroute);
3485
goto out;
3486
}
3487
3488
ipopts = V_igmp_sendra ? m_raopt : NULL;
3489
3490
imo.imo_multicast_ttl = 1;
3491
imo.imo_multicast_vif = -1;
3492
imo.imo_multicast_loop = (V_ip_mrouter != NULL);
3493
3494
/*
3495
* If the user requested that IGMP traffic be explicitly
3496
* redirected to the loopback interface (e.g. they are running a
3497
* MANET interface and the routing protocol needs to see the
3498
* updates), handle this now.
3499
*/
3500
if (m->m_flags & M_IGMP_LOOP)
3501
imo.imo_multicast_ifp = V_loif;
3502
else
3503
imo.imo_multicast_ifp = ifp;
3504
3505
if (m->m_flags & M_IGMPV2) {
3506
m0 = m;
3507
} else {
3508
m0 = igmp_v3_encap_report(ifp, m);
3509
if (m0 == NULL) {
3510
CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m);
3511
m_freem(m);
3512
IPSTAT_INC(ips_odropped);
3513
goto out;
3514
}
3515
}
3516
3517
igmp_scrub_context(m0);
3518
m_clrprotoflags(m);
3519
m0->m_pkthdr.rcvif = V_loif;
3520
#ifdef MAC
3521
mac_netinet_igmp_send(ifp, m0);
3522
#endif
3523
error = ip_output(m0, ipopts, NULL, 0, &imo, NULL);
3524
if (error) {
3525
CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error);
3526
goto out;
3527
}
3528
3529
IGMPSTAT_INC(igps_snd_reports);
3530
3531
out:
3532
/*
3533
* We must restore the existing vnet pointer before
3534
* continuing as we are run from netisr context.
3535
*/
3536
CURVNET_RESTORE();
3537
}
3538
3539
/*
3540
* Encapsulate an IGMPv3 report.
3541
*
3542
* The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3543
* chain has already had its IP/IGMPv3 header prepended. In this case
3544
* the function will not attempt to prepend; the lengths and checksums
3545
* will however be re-computed.
3546
*
3547
* Returns a pointer to the new mbuf chain head, or NULL if the
3548
* allocation failed.
3549
*/
3550
static struct mbuf *
3551
igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
3552
{
3553
struct igmp_report *igmp;
3554
struct ip *ip;
3555
int hdrlen, igmpreclen;
3556
3557
KASSERT((m->m_flags & M_PKTHDR),
3558
("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3559
3560
igmpreclen = m_length(m, NULL);
3561
hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
3562
3563
if (m->m_flags & M_IGMPV3_HDR) {
3564
igmpreclen -= hdrlen;
3565
} else {
3566
M_PREPEND(m, hdrlen, M_NOWAIT);
3567
if (m == NULL)
3568
return (NULL);
3569
m->m_flags |= M_IGMPV3_HDR;
3570
}
3571
3572
CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen);
3573
3574
m->m_data += sizeof(struct ip);
3575
m->m_len -= sizeof(struct ip);
3576
3577
igmp = mtod(m, struct igmp_report *);
3578
igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT;
3579
igmp->ir_rsv1 = 0;
3580
igmp->ir_rsv2 = 0;
3581
igmp->ir_numgrps = htons(m->m_pkthdr.vt_nrecs);
3582
igmp->ir_cksum = 0;
3583
igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
3584
m->m_pkthdr.vt_nrecs = 0;
3585
3586
m->m_data -= sizeof(struct ip);
3587
m->m_len += sizeof(struct ip);
3588
3589
ip = mtod(m, struct ip *);
3590
ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
3591
ip->ip_len = htons(hdrlen + igmpreclen);
3592
ip->ip_off = htons(IP_DF);
3593
ip->ip_p = IPPROTO_IGMP;
3594
ip->ip_sum = 0;
3595
3596
ip->ip_src.s_addr = INADDR_ANY;
3597
3598
if (m->m_flags & M_IGMP_LOOP) {
3599
struct in_ifaddr *ia;
3600
3601
IFP_TO_IA(ifp, ia);
3602
if (ia != NULL)
3603
ip->ip_src = ia->ia_addr.sin_addr;
3604
}
3605
3606
ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
3607
3608
return (m);
3609
}
3610
3611
#ifdef KTR
3612
static char *
3613
igmp_rec_type_to_str(const int type)
3614
{
3615
3616
switch (type) {
3617
case IGMP_CHANGE_TO_EXCLUDE_MODE:
3618
return "TO_EX";
3619
break;
3620
case IGMP_CHANGE_TO_INCLUDE_MODE:
3621
return "TO_IN";
3622
break;
3623
case IGMP_MODE_IS_EXCLUDE:
3624
return "MODE_EX";
3625
break;
3626
case IGMP_MODE_IS_INCLUDE:
3627
return "MODE_IN";
3628
break;
3629
case IGMP_ALLOW_NEW_SOURCES:
3630
return "ALLOW_NEW";
3631
break;
3632
case IGMP_BLOCK_OLD_SOURCES:
3633
return "BLOCK_OLD";
3634
break;
3635
default:
3636
break;
3637
}
3638
return "unknown";
3639
}
3640
#endif
3641
3642
#ifdef VIMAGE
3643
static void
3644
vnet_igmp_init(const void *unused __unused)
3645
{
3646
3647
netisr_register_vnet(&igmp_nh);
3648
}
3649
VNET_SYSINIT(vnet_igmp_init, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3650
vnet_igmp_init, NULL);
3651
3652
static void
3653
vnet_igmp_uninit(const void *unused __unused)
3654
{
3655
3656
/* This can happen when we shutdown the entire network stack. */
3657
CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3658
3659
netisr_unregister_vnet(&igmp_nh);
3660
}
3661
VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PROTO_MC, SI_ORDER_ANY,
3662
vnet_igmp_uninit, NULL);
3663
#endif
3664
3665
#ifdef DDB
3666
DB_SHOW_COMMAND(igi_list, db_show_igi_list)
3667
{
3668
struct igmp_ifsoftc *igi, *tigi;
3669
LIST_HEAD(_igi_list, igmp_ifsoftc) *igi_head;
3670
3671
if (!have_addr) {
3672
db_printf("usage: show igi_list <addr>\n");
3673
return;
3674
}
3675
igi_head = (struct _igi_list *)addr;
3676
3677
LIST_FOREACH_SAFE(igi, igi_head, igi_link, tigi) {
3678
db_printf("igmp_ifsoftc %p:\n", igi);
3679
db_printf(" ifp %p\n", igi->igi_ifp);
3680
db_printf(" version %u\n", igi->igi_version);
3681
db_printf(" v1_timer %u\n", igi->igi_v1_timer);
3682
db_printf(" v2_timer %u\n", igi->igi_v2_timer);
3683
db_printf(" v3_timer %u\n", igi->igi_v3_timer);
3684
db_printf(" flags %#x\n", igi->igi_flags);
3685
db_printf(" rv %u\n", igi->igi_rv);
3686
db_printf(" qi %u\n", igi->igi_qi);
3687
db_printf(" qri %u\n", igi->igi_qri);
3688
db_printf(" uri %u\n", igi->igi_uri);
3689
/* struct mbufq igi_gq; */
3690
db_printf("\n");
3691
}
3692
}
3693
#endif
3694
3695
static int
3696
igmp_modevent(module_t mod, int type, void *unused __unused)
3697
{
3698
3699
switch (type) {
3700
case MOD_LOAD:
3701
CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3702
IGMP_LOCK_INIT();
3703
m_raopt = igmp_ra_alloc();
3704
netisr_register(&igmp_nh);
3705
callout_init(&igmpslow_callout, 1);
3706
callout_reset(&igmpslow_callout, hz / IGMP_SLOWHZ,
3707
igmp_slowtimo, NULL);
3708
callout_init(&igmpfast_callout, 1);
3709
callout_reset(&igmpfast_callout, hz / IGMP_FASTHZ,
3710
igmp_fasttimo, NULL);
3711
break;
3712
case MOD_UNLOAD:
3713
CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3714
netisr_unregister(&igmp_nh);
3715
m_free(m_raopt);
3716
m_raopt = NULL;
3717
IGMP_LOCK_DESTROY();
3718
break;
3719
default:
3720
return (EOPNOTSUPP);
3721
}
3722
return (0);
3723
}
3724
3725
static moduledata_t igmp_mod = {
3726
"igmp",
3727
igmp_modevent,
3728
0
3729
};
3730
DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PROTO_MC, SI_ORDER_MIDDLE);
3731
3732