Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/netinet6/in6_mcast.c
39475 views
1
/*-
2
* SPDX-License-Identifier: BSD-3-Clause
3
*
4
* Copyright (c) 2009 Bruce Simpson.
5
* All rights reserved.
6
*
7
* Redistribution and use in source and binary forms, with or without
8
* modification, are permitted provided that the following conditions
9
* are met:
10
* 1. Redistributions of source code must retain the above copyright
11
* notice, this list of conditions and the following disclaimer.
12
* 2. Redistributions in binary form must reproduce the above copyright
13
* notice, this list of conditions and the following disclaimer in the
14
* documentation and/or other materials provided with the distribution.
15
* 3. The name of the author may not be used to endorse or promote
16
* products derived from this software without specific prior written
17
* permission.
18
*
19
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
* SUCH DAMAGE.
30
*/
31
32
/*
33
* IPv6 multicast socket, group, and socket option processing module.
34
* Normative references: RFC 2292, RFC 3492, RFC 3542, RFC 3678, RFC 3810.
35
*/
36
37
#include <sys/cdefs.h>
38
#include "opt_inet6.h"
39
40
#include <sys/param.h>
41
#include <sys/systm.h>
42
#include <sys/kernel.h>
43
#include <sys/ktr.h>
44
#include <sys/malloc.h>
45
#include <sys/mbuf.h>
46
#include <sys/protosw.h>
47
#include <sys/socket.h>
48
#include <sys/socketvar.h>
49
#include <sys/sysctl.h>
50
#include <sys/priv.h>
51
#include <sys/taskqueue.h>
52
#include <sys/tree.h>
53
54
#include <net/if.h>
55
#include <net/if_var.h>
56
#include <net/if_dl.h>
57
#include <net/if_private.h>
58
#include <net/route.h>
59
#include <net/route/nhop.h>
60
#include <net/vnet.h>
61
62
#include <netinet/in.h>
63
#include <netinet/udp.h>
64
#include <netinet/in_var.h>
65
#include <netinet/ip_var.h>
66
#include <netinet/udp_var.h>
67
#include <netinet6/in6_fib.h>
68
#include <netinet6/in6_var.h>
69
#include <netinet/ip6.h>
70
#include <netinet/icmp6.h>
71
#include <netinet6/ip6_var.h>
72
#include <netinet/in_pcb.h>
73
#include <netinet/tcp_var.h>
74
#include <netinet6/nd6.h>
75
#include <netinet6/mld6_var.h>
76
#include <netinet6/scope6_var.h>
77
78
#ifndef KTR_MLD
79
#define KTR_MLD KTR_INET6
80
#endif
81
82
#ifndef __SOCKUNION_DECLARED
83
union sockunion {
84
struct sockaddr_storage ss;
85
struct sockaddr sa;
86
struct sockaddr_dl sdl;
87
struct sockaddr_in6 sin6;
88
};
89
typedef union sockunion sockunion_t;
90
#define __SOCKUNION_DECLARED
91
#endif /* __SOCKUNION_DECLARED */
92
93
static MALLOC_DEFINE(M_IN6MFILTER, "in6_mfilter",
94
"IPv6 multicast PCB-layer source filter");
95
MALLOC_DEFINE(M_IP6MADDR, "in6_multi", "IPv6 multicast group");
96
static MALLOC_DEFINE(M_IP6MOPTS, "ip6_moptions", "IPv6 multicast options");
97
static MALLOC_DEFINE(M_IP6MSOURCE, "ip6_msource",
98
"IPv6 multicast MLD-layer source filter");
99
100
RB_GENERATE(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp);
101
102
/*
103
* Locking:
104
* - Lock order is: IN6_MULTI_LOCK, INP_WLOCK, IN6_MULTI_LIST_LOCK, MLD_LOCK,
105
* IF_ADDR_LOCK.
106
* - The IF_ADDR_LOCK is implicitly taken by in6m_lookup() earlier, however
107
* it can be taken by code in net/if.c also.
108
* - ip6_moptions and in6_mfilter are covered by the INP_WLOCK.
109
*
110
* struct in6_multi is covered by IN6_MULTI_LOCK. There isn't strictly
111
* any need for in6_multi itself to be virtualized -- it is bound to an ifp
112
* anyway no matter what happens.
113
*/
114
struct mtx in6_multi_list_mtx;
115
MTX_SYSINIT(in6_multi_mtx, &in6_multi_list_mtx, "in6_multi_list_mtx", MTX_DEF);
116
117
struct mtx in6_multi_free_mtx;
118
MTX_SYSINIT(in6_multi_free_mtx, &in6_multi_free_mtx, "in6_multi_free_mtx", MTX_DEF);
119
120
struct sx in6_multi_sx;
121
SX_SYSINIT(in6_multi_sx, &in6_multi_sx, "in6_multi_sx");
122
123
static void im6f_commit(struct in6_mfilter *);
124
static int im6f_get_source(struct in6_mfilter *imf,
125
const struct sockaddr_in6 *psin,
126
struct in6_msource **);
127
static struct in6_msource *
128
im6f_graft(struct in6_mfilter *, const uint8_t,
129
const struct sockaddr_in6 *);
130
static void im6f_leave(struct in6_mfilter *);
131
static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *);
132
static void im6f_purge(struct in6_mfilter *);
133
static void im6f_rollback(struct in6_mfilter *);
134
static void im6f_reap(struct in6_mfilter *);
135
static struct in6_mfilter *
136
im6o_match_group(const struct ip6_moptions *,
137
const struct ifnet *, const struct sockaddr *);
138
static struct in6_msource *
139
im6o_match_source(struct in6_mfilter *, const struct sockaddr *);
140
static void im6s_merge(struct ip6_msource *ims,
141
const struct in6_msource *lims, const int rollback);
142
static int in6_getmulti(struct ifnet *, const struct in6_addr *,
143
struct in6_multi **);
144
static int in6_joingroup_locked(struct ifnet *, const struct in6_addr *,
145
struct in6_mfilter *, struct in6_multi **, int);
146
static int in6m_get_source(struct in6_multi *inm,
147
const struct in6_addr *addr, const int noalloc,
148
struct ip6_msource **pims);
149
#ifdef KTR
150
static int in6m_is_ifp_detached(const struct in6_multi *);
151
#endif
152
static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *);
153
static void in6m_purge(struct in6_multi *);
154
static void in6m_reap(struct in6_multi *);
155
static struct ip6_moptions *
156
in6p_findmoptions(struct inpcb *);
157
static int in6p_get_source_filters(struct inpcb *, struct sockopt *);
158
static int in6p_join_group(struct inpcb *, struct sockopt *);
159
static int in6p_leave_group(struct inpcb *, struct sockopt *);
160
static struct ifnet *
161
in6p_lookup_mcast_ifp(const struct inpcb *,
162
const struct sockaddr_in6 *);
163
static int in6p_block_unblock_source(struct inpcb *, struct sockopt *);
164
static int in6p_set_multicast_if(struct inpcb *, struct sockopt *);
165
static int in6p_set_source_filters(struct inpcb *, struct sockopt *);
166
static int sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS);
167
168
SYSCTL_DECL(_net_inet6_ip6); /* XXX Not in any common header. */
169
170
static SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, mcast,
171
CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
172
"IPv6 multicast");
173
174
static u_long in6_mcast_maxgrpsrc = IPV6_MAX_GROUP_SRC_FILTER;
175
SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxgrpsrc,
176
CTLFLAG_RWTUN, &in6_mcast_maxgrpsrc, 0,
177
"Max source filters per group");
178
179
static u_long in6_mcast_maxsocksrc = IPV6_MAX_SOCK_SRC_FILTER;
180
SYSCTL_ULONG(_net_inet6_ip6_mcast, OID_AUTO, maxsocksrc,
181
CTLFLAG_RWTUN, &in6_mcast_maxsocksrc, 0,
182
"Max source filters per socket");
183
184
/* TODO Virtualize this switch. */
185
int in6_mcast_loop = IPV6_DEFAULT_MULTICAST_LOOP;
186
SYSCTL_INT(_net_inet6_ip6_mcast, OID_AUTO, loop, CTLFLAG_RWTUN,
187
&in6_mcast_loop, 0, "Loopback multicast datagrams by default");
188
189
static SYSCTL_NODE(_net_inet6_ip6_mcast, OID_AUTO, filters,
190
CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip6_mcast_filters,
191
"Per-interface stack-wide source filters");
192
193
#ifdef KTR
194
/*
195
* Inline function which wraps assertions for a valid ifp.
196
* The ifnet layer will set the ifma's ifp pointer to NULL if the ifp
197
* is detached.
198
*/
199
static int __inline
200
in6m_is_ifp_detached(const struct in6_multi *inm)
201
{
202
struct ifnet *ifp;
203
204
KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__));
205
ifp = inm->in6m_ifma->ifma_ifp;
206
if (ifp != NULL) {
207
/*
208
* Sanity check that network-layer notion of ifp is the
209
* same as that of link-layer.
210
*/
211
KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__));
212
}
213
214
return (ifp == NULL);
215
}
216
#endif
217
218
/*
219
* Initialize an in6_mfilter structure to a known state at t0, t1
220
* with an empty source filter list.
221
*/
222
static __inline void
223
im6f_init(struct in6_mfilter *imf, const int st0, const int st1)
224
{
225
memset(imf, 0, sizeof(struct in6_mfilter));
226
RB_INIT(&imf->im6f_sources);
227
imf->im6f_st[0] = st0;
228
imf->im6f_st[1] = st1;
229
}
230
231
struct in6_mfilter *
232
ip6_mfilter_alloc(const int mflags, const int st0, const int st1)
233
{
234
struct in6_mfilter *imf;
235
236
imf = malloc(sizeof(*imf), M_IN6MFILTER, mflags);
237
238
if (imf != NULL)
239
im6f_init(imf, st0, st1);
240
241
return (imf);
242
}
243
244
void
245
ip6_mfilter_free(struct in6_mfilter *imf)
246
{
247
248
im6f_purge(imf);
249
free(imf, M_IN6MFILTER);
250
}
251
252
/*
253
* Find an IPv6 multicast group entry for this ip6_moptions instance
254
* which matches the specified group, and optionally an interface.
255
* Return its index into the array, or -1 if not found.
256
*/
257
static struct in6_mfilter *
258
im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp,
259
const struct sockaddr *group)
260
{
261
const struct sockaddr_in6 *gsin6;
262
struct in6_mfilter *imf;
263
struct in6_multi *inm;
264
265
gsin6 = (const struct sockaddr_in6 *)group;
266
267
IP6_MFILTER_FOREACH(imf, &imo->im6o_head) {
268
inm = imf->im6f_in6m;
269
if (inm == NULL)
270
continue;
271
if ((ifp == NULL || (inm->in6m_ifp == ifp)) &&
272
IN6_ARE_ADDR_EQUAL(&inm->in6m_addr,
273
&gsin6->sin6_addr)) {
274
break;
275
}
276
}
277
return (imf);
278
}
279
280
/*
281
* Find an IPv6 multicast source entry for this imo which matches
282
* the given group index for this socket, and source address.
283
*
284
* XXX TODO: The scope ID, if present in src, is stripped before
285
* any comparison. We SHOULD enforce scope/zone checks where the source
286
* filter entry has a link scope.
287
*
288
* NOTE: This does not check if the entry is in-mode, merely if
289
* it exists, which may not be the desired behaviour.
290
*/
291
static struct in6_msource *
292
im6o_match_source(struct in6_mfilter *imf, const struct sockaddr *src)
293
{
294
struct ip6_msource find;
295
struct ip6_msource *ims;
296
const sockunion_t *psa;
297
298
KASSERT(src->sa_family == AF_INET6, ("%s: !AF_INET6", __func__));
299
300
psa = (const sockunion_t *)src;
301
find.im6s_addr = psa->sin6.sin6_addr;
302
in6_clearscope(&find.im6s_addr); /* XXX */
303
ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find);
304
305
return ((struct in6_msource *)ims);
306
}
307
308
/*
309
* Perform filtering for multicast datagrams on a socket by group and source.
310
*
311
* Returns 0 if a datagram should be allowed through, or various error codes
312
* if the socket was not a member of the group, or the source was muted, etc.
313
*/
314
int
315
im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp,
316
const struct sockaddr *group, const struct sockaddr *src)
317
{
318
struct in6_mfilter *imf;
319
struct in6_msource *ims;
320
int mode;
321
322
KASSERT(ifp != NULL, ("%s: null ifp", __func__));
323
324
imf = im6o_match_group(imo, ifp, group);
325
if (imf == NULL)
326
return (MCAST_NOTGMEMBER);
327
328
/*
329
* Check if the source was included in an (S,G) join.
330
* Allow reception on exclusive memberships by default,
331
* reject reception on inclusive memberships by default.
332
* Exclude source only if an in-mode exclude filter exists.
333
* Include source only if an in-mode include filter exists.
334
* NOTE: We are comparing group state here at MLD t1 (now)
335
* with socket-layer t0 (since last downcall).
336
*/
337
mode = imf->im6f_st[1];
338
ims = im6o_match_source(imf, src);
339
340
if ((ims == NULL && mode == MCAST_INCLUDE) ||
341
(ims != NULL && ims->im6sl_st[0] != mode))
342
return (MCAST_NOTSMEMBER);
343
344
return (MCAST_PASS);
345
}
346
347
/*
348
* Look up an in6_multi record for an IPv6 multicast address
349
* on the interface ifp.
350
* If no record found, return NULL.
351
*
352
* SMPng: The IN6_MULTI_LOCK and must be held and must be in network epoch.
353
*/
354
struct in6_multi *
355
in6m_lookup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr)
356
{
357
struct ifmultiaddr *ifma;
358
struct in6_multi *inm;
359
360
NET_EPOCH_ASSERT();
361
362
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
363
inm = in6m_ifmultiaddr_get_inm(ifma);
364
if (inm == NULL)
365
continue;
366
if (IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, mcaddr))
367
return (inm);
368
}
369
return (NULL);
370
}
371
372
/*
373
* Find and return a reference to an in6_multi record for (ifp, group),
374
* and bump its reference count.
375
* If one does not exist, try to allocate it, and update link-layer multicast
376
* filters on ifp to listen for group.
377
* Assumes the IN6_MULTI lock is held across the call.
378
* Return 0 if successful, otherwise return an appropriate error code.
379
*/
380
static int
381
in6_getmulti(struct ifnet *ifp, const struct in6_addr *group,
382
struct in6_multi **pinm)
383
{
384
struct epoch_tracker et;
385
struct sockaddr_in6 gsin6;
386
struct ifmultiaddr *ifma;
387
struct in6_multi *inm;
388
int error;
389
390
error = 0;
391
392
/*
393
* XXX: Accesses to ifma_protospec must be covered by IF_ADDR_LOCK;
394
* if_addmulti() takes this mutex itself, so we must drop and
395
* re-acquire around the call.
396
*/
397
IN6_MULTI_LOCK_ASSERT();
398
IN6_MULTI_LIST_LOCK();
399
IF_ADDR_WLOCK(ifp);
400
NET_EPOCH_ENTER(et);
401
/*
402
* Does ifp support IPv6 multicasts?
403
*/
404
if (ifp->if_afdata[AF_INET6] == NULL)
405
error = ENODEV;
406
else
407
inm = in6m_lookup_locked(ifp, group);
408
NET_EPOCH_EXIT(et);
409
410
if (error != 0)
411
goto out_locked;
412
413
if (inm != NULL) {
414
/*
415
* If we already joined this group, just bump the
416
* refcount and return it.
417
*/
418
KASSERT(inm->in6m_refcount >= 1,
419
("%s: bad refcount %d", __func__, inm->in6m_refcount));
420
in6m_acquire_locked(inm);
421
*pinm = inm;
422
goto out_locked;
423
}
424
425
memset(&gsin6, 0, sizeof(gsin6));
426
gsin6.sin6_family = AF_INET6;
427
gsin6.sin6_len = sizeof(struct sockaddr_in6);
428
gsin6.sin6_addr = *group;
429
430
/*
431
* Check if a link-layer group is already associated
432
* with this network-layer group on the given ifnet.
433
*/
434
IN6_MULTI_LIST_UNLOCK();
435
IF_ADDR_WUNLOCK(ifp);
436
error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma);
437
if (error != 0)
438
return (error);
439
IN6_MULTI_LIST_LOCK();
440
IF_ADDR_WLOCK(ifp);
441
442
/*
443
* If something other than netinet6 is occupying the link-layer
444
* group, print a meaningful error message and back out of
445
* the allocation.
446
* Otherwise, bump the refcount on the existing network-layer
447
* group association and return it.
448
*/
449
if (ifma->ifma_protospec != NULL) {
450
inm = (struct in6_multi *)ifma->ifma_protospec;
451
#ifdef INVARIANTS
452
KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr",
453
__func__));
454
KASSERT(ifma->ifma_addr->sa_family == AF_INET6,
455
("%s: ifma not AF_INET6", __func__));
456
KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__));
457
if (inm->in6m_ifma != ifma || inm->in6m_ifp != ifp ||
458
!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, group))
459
panic("%s: ifma %p is inconsistent with %p (%p)",
460
__func__, ifma, inm, group);
461
#endif
462
in6m_acquire_locked(inm);
463
*pinm = inm;
464
goto out_locked;
465
}
466
467
IF_ADDR_WLOCK_ASSERT(ifp);
468
469
/*
470
* A new in6_multi record is needed; allocate and initialize it.
471
* We DO NOT perform an MLD join as the in6_ layer may need to
472
* push an initial source list down to MLD to support SSM.
473
*
474
* The initial source filter state is INCLUDE, {} as per the RFC.
475
* Pending state-changes per group are subject to a bounds check.
476
*/
477
inm = malloc(sizeof(*inm), M_IP6MADDR, M_NOWAIT | M_ZERO);
478
if (inm == NULL) {
479
IN6_MULTI_LIST_UNLOCK();
480
IF_ADDR_WUNLOCK(ifp);
481
if_delmulti_ifma(ifma);
482
return (ENOMEM);
483
}
484
inm->in6m_addr = *group;
485
inm->in6m_ifp = ifp;
486
inm->in6m_mli = MLD_IFINFO(ifp);
487
inm->in6m_ifma = ifma;
488
inm->in6m_refcount = 1;
489
inm->in6m_state = MLD_NOT_MEMBER;
490
mbufq_init(&inm->in6m_scq, MLD_MAX_STATE_CHANGES);
491
492
inm->in6m_st[0].iss_fmode = MCAST_UNDEFINED;
493
inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
494
RB_INIT(&inm->in6m_srcs);
495
496
ifma->ifma_protospec = inm;
497
*pinm = inm;
498
499
out_locked:
500
IN6_MULTI_LIST_UNLOCK();
501
IF_ADDR_WUNLOCK(ifp);
502
return (error);
503
}
504
505
/*
506
* Drop a reference to an in6_multi record.
507
*
508
* If the refcount drops to 0, free the in6_multi record and
509
* delete the underlying link-layer membership.
510
*/
511
static void
512
in6m_release(struct in6_multi *inm)
513
{
514
struct ifmultiaddr *ifma;
515
struct ifnet *ifp;
516
517
CTR2(KTR_MLD, "%s: refcount is %d", __func__, inm->in6m_refcount);
518
519
MPASS(inm->in6m_refcount == 0);
520
CTR2(KTR_MLD, "%s: freeing inm %p", __func__, inm);
521
522
ifma = inm->in6m_ifma;
523
ifp = inm->in6m_ifp;
524
MPASS(ifma->ifma_llifma == NULL);
525
526
/* XXX this access is not covered by IF_ADDR_LOCK */
527
CTR2(KTR_MLD, "%s: purging ifma %p", __func__, ifma);
528
KASSERT(ifma->ifma_protospec == NULL,
529
("%s: ifma_protospec != NULL", __func__));
530
if (ifp == NULL)
531
ifp = ifma->ifma_ifp;
532
533
if (ifp != NULL) {
534
CURVNET_SET(ifp->if_vnet);
535
in6m_purge(inm);
536
free(inm, M_IP6MADDR);
537
if_delmulti_ifma_flags(ifma, 1);
538
CURVNET_RESTORE();
539
if_rele(ifp);
540
} else {
541
in6m_purge(inm);
542
free(inm, M_IP6MADDR);
543
if_delmulti_ifma_flags(ifma, 1);
544
}
545
}
546
547
/*
548
* Interface detach can happen in a taskqueue thread context, so we must use a
549
* dedicated thread to avoid deadlocks when draining in6m_release tasks.
550
*/
551
TASKQUEUE_DEFINE_THREAD(in6m_free);
552
static struct in6_multi_head in6m_free_list = SLIST_HEAD_INITIALIZER();
553
static void in6m_release_task(void *arg __unused, int pending __unused);
554
static struct task in6m_free_task = TASK_INITIALIZER(0, in6m_release_task, NULL);
555
556
void
557
in6m_release_list_deferred(struct in6_multi_head *inmh)
558
{
559
if (SLIST_EMPTY(inmh))
560
return;
561
mtx_lock(&in6_multi_free_mtx);
562
SLIST_CONCAT(&in6m_free_list, inmh, in6_multi, in6m_nrele);
563
mtx_unlock(&in6_multi_free_mtx);
564
taskqueue_enqueue(taskqueue_in6m_free, &in6m_free_task);
565
}
566
567
void
568
in6m_release_wait(void *arg __unused)
569
{
570
571
/*
572
* Make sure all pending multicast addresses are freed before
573
* the VNET or network device is destroyed:
574
*/
575
taskqueue_drain_all(taskqueue_in6m_free);
576
}
577
#ifdef VIMAGE
578
/* XXX-BZ FIXME, see D24914. */
579
VNET_SYSUNINIT(in6m_release_wait, SI_SUB_PROTO_DOMAIN, SI_ORDER_FIRST, in6m_release_wait, NULL);
580
#endif
581
582
void
583
in6m_disconnect_locked(struct in6_multi_head *inmh, struct in6_multi *inm)
584
{
585
struct ifnet *ifp;
586
struct ifaddr *ifa;
587
struct in6_ifaddr *ifa6;
588
struct in6_multi_mship *imm, *imm_tmp;
589
struct ifmultiaddr *ifma, *ll_ifma;
590
591
IN6_MULTI_LIST_LOCK_ASSERT();
592
593
ifp = inm->in6m_ifp;
594
if (ifp == NULL)
595
return; /* already called */
596
597
inm->in6m_ifp = NULL;
598
IF_ADDR_WLOCK_ASSERT(ifp);
599
ifma = inm->in6m_ifma;
600
if (ifma == NULL)
601
return;
602
603
if_ref(ifp);
604
if (ifma->ifma_flags & IFMA_F_ENQUEUED) {
605
CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifmultiaddr, ifma_link);
606
ifma->ifma_flags &= ~IFMA_F_ENQUEUED;
607
}
608
MCDPRINTF("removed ifma: %p from %s\n", ifma, ifp->if_xname);
609
if ((ll_ifma = ifma->ifma_llifma) != NULL) {
610
MPASS(ifma != ll_ifma);
611
ifma->ifma_llifma = NULL;
612
MPASS(ll_ifma->ifma_llifma == NULL);
613
MPASS(ll_ifma->ifma_ifp == ifp);
614
if (--ll_ifma->ifma_refcount == 0) {
615
if (ll_ifma->ifma_flags & IFMA_F_ENQUEUED) {
616
CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, ifmultiaddr, ifma_link);
617
ll_ifma->ifma_flags &= ~IFMA_F_ENQUEUED;
618
}
619
MCDPRINTF("removed ll_ifma: %p from %s\n", ll_ifma, ifp->if_xname);
620
if_freemulti(ll_ifma);
621
}
622
}
623
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
624
if (ifa->ifa_addr->sa_family != AF_INET6)
625
continue;
626
ifa6 = (void *)ifa;
627
LIST_FOREACH_SAFE(imm, &ifa6->ia6_memberships,
628
i6mm_chain, imm_tmp) {
629
if (inm == imm->i6mm_maddr) {
630
LIST_REMOVE(imm, i6mm_chain);
631
free(imm, M_IP6MADDR);
632
in6m_rele_locked(inmh, inm);
633
}
634
}
635
}
636
}
637
638
static void
639
in6m_release_task(void *arg __unused, int pending __unused)
640
{
641
struct in6_multi_head in6m_free_tmp;
642
struct in6_multi *inm, *tinm;
643
644
SLIST_INIT(&in6m_free_tmp);
645
mtx_lock(&in6_multi_free_mtx);
646
SLIST_CONCAT(&in6m_free_tmp, &in6m_free_list, in6_multi, in6m_nrele);
647
mtx_unlock(&in6_multi_free_mtx);
648
IN6_MULTI_LOCK();
649
SLIST_FOREACH_SAFE(inm, &in6m_free_tmp, in6m_nrele, tinm) {
650
SLIST_REMOVE_HEAD(&in6m_free_tmp, in6m_nrele);
651
in6m_release(inm);
652
}
653
IN6_MULTI_UNLOCK();
654
}
655
656
/*
657
* Clear recorded source entries for a group.
658
* Used by the MLD code. Caller must hold the IN6_MULTI lock.
659
* FIXME: Should reap.
660
*/
661
void
662
in6m_clear_recorded(struct in6_multi *inm)
663
{
664
struct ip6_msource *ims;
665
666
IN6_MULTI_LIST_LOCK_ASSERT();
667
668
RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) {
669
if (ims->im6s_stp) {
670
ims->im6s_stp = 0;
671
--inm->in6m_st[1].iss_rec;
672
}
673
}
674
KASSERT(inm->in6m_st[1].iss_rec == 0,
675
("%s: iss_rec %d not 0", __func__, inm->in6m_st[1].iss_rec));
676
}
677
678
/*
679
* Record a source as pending for a Source-Group MLDv2 query.
680
* This lives here as it modifies the shared tree.
681
*
682
* inm is the group descriptor.
683
* naddr is the address of the source to record in network-byte order.
684
*
685
* If the net.inet6.mld.sgalloc sysctl is non-zero, we will
686
* lazy-allocate a source node in response to an SG query.
687
* Otherwise, no allocation is performed. This saves some memory
688
* with the trade-off that the source will not be reported to the
689
* router if joined in the window between the query response and
690
* the group actually being joined on the local host.
691
*
692
* VIMAGE: XXX: Currently the mld_sgalloc feature has been removed.
693
* This turns off the allocation of a recorded source entry if
694
* the group has not been joined.
695
*
696
* Return 0 if the source didn't exist or was already marked as recorded.
697
* Return 1 if the source was marked as recorded by this function.
698
* Return <0 if any error occurred (negated errno code).
699
*/
700
int
701
in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr)
702
{
703
struct ip6_msource find;
704
struct ip6_msource *ims, *nims;
705
706
IN6_MULTI_LIST_LOCK_ASSERT();
707
708
find.im6s_addr = *addr;
709
ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find);
710
if (ims && ims->im6s_stp)
711
return (0);
712
if (ims == NULL) {
713
if (inm->in6m_nsrc == in6_mcast_maxgrpsrc)
714
return (-ENOSPC);
715
nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE,
716
M_NOWAIT | M_ZERO);
717
if (nims == NULL)
718
return (-ENOMEM);
719
nims->im6s_addr = find.im6s_addr;
720
RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims);
721
++inm->in6m_nsrc;
722
ims = nims;
723
}
724
725
/*
726
* Mark the source as recorded and update the recorded
727
* source count.
728
*/
729
++ims->im6s_stp;
730
++inm->in6m_st[1].iss_rec;
731
732
return (1);
733
}
734
735
/*
736
* Return a pointer to an in6_msource owned by an in6_mfilter,
737
* given its source address.
738
* Lazy-allocate if needed. If this is a new entry its filter state is
739
* undefined at t0.
740
*
741
* imf is the filter set being modified.
742
* addr is the source address.
743
*
744
* SMPng: May be called with locks held; malloc must not block.
745
*/
746
static int
747
im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin,
748
struct in6_msource **plims)
749
{
750
struct ip6_msource find;
751
struct ip6_msource *ims, *nims;
752
struct in6_msource *lims;
753
int error;
754
755
error = 0;
756
ims = NULL;
757
lims = NULL;
758
759
find.im6s_addr = psin->sin6_addr;
760
ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find);
761
lims = (struct in6_msource *)ims;
762
if (lims == NULL) {
763
if (imf->im6f_nsrc == in6_mcast_maxsocksrc)
764
return (ENOSPC);
765
nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER,
766
M_NOWAIT | M_ZERO);
767
if (nims == NULL)
768
return (ENOMEM);
769
lims = (struct in6_msource *)nims;
770
lims->im6s_addr = find.im6s_addr;
771
lims->im6sl_st[0] = MCAST_UNDEFINED;
772
RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims);
773
++imf->im6f_nsrc;
774
}
775
776
*plims = lims;
777
778
return (error);
779
}
780
781
/*
782
* Graft a source entry into an existing socket-layer filter set,
783
* maintaining any required invariants and checking allocations.
784
*
785
* The source is marked as being in the new filter mode at t1.
786
*
787
* Return the pointer to the new node, otherwise return NULL.
788
*/
789
static struct in6_msource *
790
im6f_graft(struct in6_mfilter *imf, const uint8_t st1,
791
const struct sockaddr_in6 *psin)
792
{
793
struct ip6_msource *nims;
794
struct in6_msource *lims;
795
796
nims = malloc(sizeof(struct in6_msource), M_IN6MFILTER,
797
M_NOWAIT | M_ZERO);
798
if (nims == NULL)
799
return (NULL);
800
lims = (struct in6_msource *)nims;
801
lims->im6s_addr = psin->sin6_addr;
802
lims->im6sl_st[0] = MCAST_UNDEFINED;
803
lims->im6sl_st[1] = st1;
804
RB_INSERT(ip6_msource_tree, &imf->im6f_sources, nims);
805
++imf->im6f_nsrc;
806
807
return (lims);
808
}
809
810
/*
811
* Prune a source entry from an existing socket-layer filter set,
812
* maintaining any required invariants and checking allocations.
813
*
814
* The source is marked as being left at t1, it is not freed.
815
*
816
* Return 0 if no error occurred, otherwise return an errno value.
817
*/
818
static int
819
im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin)
820
{
821
struct ip6_msource find;
822
struct ip6_msource *ims;
823
struct in6_msource *lims;
824
825
find.im6s_addr = psin->sin6_addr;
826
ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find);
827
if (ims == NULL)
828
return (ENOENT);
829
lims = (struct in6_msource *)ims;
830
lims->im6sl_st[1] = MCAST_UNDEFINED;
831
return (0);
832
}
833
834
/*
835
* Revert socket-layer filter set deltas at t1 to t0 state.
836
*/
837
static void
838
im6f_rollback(struct in6_mfilter *imf)
839
{
840
struct ip6_msource *ims, *tims;
841
struct in6_msource *lims;
842
843
RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) {
844
lims = (struct in6_msource *)ims;
845
if (lims->im6sl_st[0] == lims->im6sl_st[1]) {
846
/* no change at t1 */
847
continue;
848
} else if (lims->im6sl_st[0] != MCAST_UNDEFINED) {
849
/* revert change to existing source at t1 */
850
lims->im6sl_st[1] = lims->im6sl_st[0];
851
} else {
852
/* revert source added t1 */
853
CTR2(KTR_MLD, "%s: free ims %p", __func__, ims);
854
RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims);
855
free(ims, M_IN6MFILTER);
856
imf->im6f_nsrc--;
857
}
858
}
859
imf->im6f_st[1] = imf->im6f_st[0];
860
}
861
862
/*
863
* Mark socket-layer filter set as INCLUDE {} at t1.
864
*/
865
static void
866
im6f_leave(struct in6_mfilter *imf)
867
{
868
struct ip6_msource *ims;
869
struct in6_msource *lims;
870
871
RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) {
872
lims = (struct in6_msource *)ims;
873
lims->im6sl_st[1] = MCAST_UNDEFINED;
874
}
875
imf->im6f_st[1] = MCAST_INCLUDE;
876
}
877
878
/*
879
* Mark socket-layer filter set deltas as committed.
880
*/
881
static void
882
im6f_commit(struct in6_mfilter *imf)
883
{
884
struct ip6_msource *ims;
885
struct in6_msource *lims;
886
887
RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) {
888
lims = (struct in6_msource *)ims;
889
lims->im6sl_st[0] = lims->im6sl_st[1];
890
}
891
imf->im6f_st[0] = imf->im6f_st[1];
892
}
893
894
/*
895
* Reap unreferenced sources from socket-layer filter set.
896
*/
897
static void
898
im6f_reap(struct in6_mfilter *imf)
899
{
900
struct ip6_msource *ims, *tims;
901
struct in6_msource *lims;
902
903
RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) {
904
lims = (struct in6_msource *)ims;
905
if ((lims->im6sl_st[0] == MCAST_UNDEFINED) &&
906
(lims->im6sl_st[1] == MCAST_UNDEFINED)) {
907
CTR2(KTR_MLD, "%s: free lims %p", __func__, ims);
908
RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims);
909
free(ims, M_IN6MFILTER);
910
imf->im6f_nsrc--;
911
}
912
}
913
}
914
915
/*
916
* Purge socket-layer filter set.
917
*/
918
static void
919
im6f_purge(struct in6_mfilter *imf)
920
{
921
struct ip6_msource *ims, *tims;
922
923
RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) {
924
CTR2(KTR_MLD, "%s: free ims %p", __func__, ims);
925
RB_REMOVE(ip6_msource_tree, &imf->im6f_sources, ims);
926
free(ims, M_IN6MFILTER);
927
imf->im6f_nsrc--;
928
}
929
imf->im6f_st[0] = imf->im6f_st[1] = MCAST_UNDEFINED;
930
KASSERT(RB_EMPTY(&imf->im6f_sources),
931
("%s: im6f_sources not empty", __func__));
932
}
933
934
/*
935
* Look up a source filter entry for a multicast group.
936
*
937
* inm is the group descriptor to work with.
938
* addr is the IPv6 address to look up.
939
* noalloc may be non-zero to suppress allocation of sources.
940
* *pims will be set to the address of the retrieved or allocated source.
941
*
942
* SMPng: NOTE: may be called with locks held.
943
* Return 0 if successful, otherwise return a non-zero error code.
944
*/
945
static int
946
in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr,
947
const int noalloc, struct ip6_msource **pims)
948
{
949
struct ip6_msource find;
950
struct ip6_msource *ims, *nims;
951
#ifdef KTR
952
char ip6tbuf[INET6_ADDRSTRLEN];
953
#endif
954
955
find.im6s_addr = *addr;
956
ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find);
957
if (ims == NULL && !noalloc) {
958
if (inm->in6m_nsrc == in6_mcast_maxgrpsrc)
959
return (ENOSPC);
960
nims = malloc(sizeof(struct ip6_msource), M_IP6MSOURCE,
961
M_NOWAIT | M_ZERO);
962
if (nims == NULL)
963
return (ENOMEM);
964
nims->im6s_addr = *addr;
965
RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims);
966
++inm->in6m_nsrc;
967
ims = nims;
968
CTR3(KTR_MLD, "%s: allocated %s as %p", __func__,
969
ip6_sprintf(ip6tbuf, addr), ims);
970
}
971
972
*pims = ims;
973
return (0);
974
}
975
976
/*
977
* Merge socket-layer source into MLD-layer source.
978
* If rollback is non-zero, perform the inverse of the merge.
979
*/
980
static void
981
im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims,
982
const int rollback)
983
{
984
int n = rollback ? -1 : 1;
985
#ifdef KTR
986
char ip6tbuf[INET6_ADDRSTRLEN];
987
988
ip6_sprintf(ip6tbuf, &lims->im6s_addr);
989
#endif
990
991
if (lims->im6sl_st[0] == MCAST_EXCLUDE) {
992
CTR3(KTR_MLD, "%s: t1 ex -= %d on %s", __func__, n, ip6tbuf);
993
ims->im6s_st[1].ex -= n;
994
} else if (lims->im6sl_st[0] == MCAST_INCLUDE) {
995
CTR3(KTR_MLD, "%s: t1 in -= %d on %s", __func__, n, ip6tbuf);
996
ims->im6s_st[1].in -= n;
997
}
998
999
if (lims->im6sl_st[1] == MCAST_EXCLUDE) {
1000
CTR3(KTR_MLD, "%s: t1 ex += %d on %s", __func__, n, ip6tbuf);
1001
ims->im6s_st[1].ex += n;
1002
} else if (lims->im6sl_st[1] == MCAST_INCLUDE) {
1003
CTR3(KTR_MLD, "%s: t1 in += %d on %s", __func__, n, ip6tbuf);
1004
ims->im6s_st[1].in += n;
1005
}
1006
}
1007
1008
/*
1009
* Atomically update the global in6_multi state, when a membership's
1010
* filter list is being updated in any way.
1011
*
1012
* imf is the per-inpcb-membership group filter pointer.
1013
* A fake imf may be passed for in-kernel consumers.
1014
*
1015
* XXX This is a candidate for a set-symmetric-difference style loop
1016
* which would eliminate the repeated lookup from root of ims nodes,
1017
* as they share the same key space.
1018
*
1019
* If any error occurred this function will back out of refcounts
1020
* and return a non-zero value.
1021
*/
1022
static int
1023
in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
1024
{
1025
struct ip6_msource *ims, *nims;
1026
struct in6_msource *lims;
1027
int schanged, error;
1028
int nsrc0, nsrc1;
1029
1030
schanged = 0;
1031
error = 0;
1032
nsrc1 = nsrc0 = 0;
1033
IN6_MULTI_LIST_LOCK_ASSERT();
1034
1035
/*
1036
* Update the source filters first, as this may fail.
1037
* Maintain count of in-mode filters at t0, t1. These are
1038
* used to work out if we transition into ASM mode or not.
1039
* Maintain a count of source filters whose state was
1040
* actually modified by this operation.
1041
*/
1042
RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) {
1043
lims = (struct in6_msource *)ims;
1044
if (lims->im6sl_st[0] == imf->im6f_st[0]) nsrc0++;
1045
if (lims->im6sl_st[1] == imf->im6f_st[1]) nsrc1++;
1046
if (lims->im6sl_st[0] == lims->im6sl_st[1]) continue;
1047
error = in6m_get_source(inm, &lims->im6s_addr, 0, &nims);
1048
++schanged;
1049
if (error)
1050
break;
1051
im6s_merge(nims, lims, 0);
1052
}
1053
if (error) {
1054
struct ip6_msource *bims;
1055
1056
RB_FOREACH_REVERSE_FROM(ims, ip6_msource_tree, nims) {
1057
lims = (struct in6_msource *)ims;
1058
if (lims->im6sl_st[0] == lims->im6sl_st[1])
1059
continue;
1060
(void)in6m_get_source(inm, &lims->im6s_addr, 1, &bims);
1061
if (bims == NULL)
1062
continue;
1063
im6s_merge(bims, lims, 1);
1064
}
1065
goto out_reap;
1066
}
1067
1068
CTR3(KTR_MLD, "%s: imf filters in-mode: %d at t0, %d at t1",
1069
__func__, nsrc0, nsrc1);
1070
1071
/* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */
1072
if (imf->im6f_st[0] == imf->im6f_st[1] &&
1073
imf->im6f_st[1] == MCAST_INCLUDE) {
1074
if (nsrc1 == 0) {
1075
CTR1(KTR_MLD, "%s: --in on inm at t1", __func__);
1076
--inm->in6m_st[1].iss_in;
1077
}
1078
}
1079
1080
/* Handle filter mode transition on socket. */
1081
if (imf->im6f_st[0] != imf->im6f_st[1]) {
1082
CTR3(KTR_MLD, "%s: imf transition %d to %d",
1083
__func__, imf->im6f_st[0], imf->im6f_st[1]);
1084
1085
if (imf->im6f_st[0] == MCAST_EXCLUDE) {
1086
CTR1(KTR_MLD, "%s: --ex on inm at t1", __func__);
1087
--inm->in6m_st[1].iss_ex;
1088
} else if (imf->im6f_st[0] == MCAST_INCLUDE) {
1089
CTR1(KTR_MLD, "%s: --in on inm at t1", __func__);
1090
--inm->in6m_st[1].iss_in;
1091
}
1092
1093
if (imf->im6f_st[1] == MCAST_EXCLUDE) {
1094
CTR1(KTR_MLD, "%s: ex++ on inm at t1", __func__);
1095
inm->in6m_st[1].iss_ex++;
1096
} else if (imf->im6f_st[1] == MCAST_INCLUDE && nsrc1 > 0) {
1097
CTR1(KTR_MLD, "%s: in++ on inm at t1", __func__);
1098
inm->in6m_st[1].iss_in++;
1099
}
1100
}
1101
1102
/*
1103
* Track inm filter state in terms of listener counts.
1104
* If there are any exclusive listeners, stack-wide
1105
* membership is exclusive.
1106
* Otherwise, if only inclusive listeners, stack-wide is inclusive.
1107
* If no listeners remain, state is undefined at t1,
1108
* and the MLD lifecycle for this group should finish.
1109
*/
1110
if (inm->in6m_st[1].iss_ex > 0) {
1111
CTR1(KTR_MLD, "%s: transition to EX", __func__);
1112
inm->in6m_st[1].iss_fmode = MCAST_EXCLUDE;
1113
} else if (inm->in6m_st[1].iss_in > 0) {
1114
CTR1(KTR_MLD, "%s: transition to IN", __func__);
1115
inm->in6m_st[1].iss_fmode = MCAST_INCLUDE;
1116
} else {
1117
CTR1(KTR_MLD, "%s: transition to UNDEF", __func__);
1118
inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
1119
}
1120
1121
/* Decrement ASM listener count on transition out of ASM mode. */
1122
if (imf->im6f_st[0] == MCAST_EXCLUDE && nsrc0 == 0) {
1123
if ((imf->im6f_st[1] != MCAST_EXCLUDE) ||
1124
(imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 > 0)) {
1125
CTR1(KTR_MLD, "%s: --asm on inm at t1", __func__);
1126
--inm->in6m_st[1].iss_asm;
1127
}
1128
}
1129
1130
/* Increment ASM listener count on transition to ASM mode. */
1131
if (imf->im6f_st[1] == MCAST_EXCLUDE && nsrc1 == 0) {
1132
CTR1(KTR_MLD, "%s: asm++ on inm at t1", __func__);
1133
inm->in6m_st[1].iss_asm++;
1134
}
1135
1136
CTR3(KTR_MLD, "%s: merged imf %p to inm %p", __func__, imf, inm);
1137
in6m_print(inm);
1138
1139
out_reap:
1140
if (schanged > 0) {
1141
CTR1(KTR_MLD, "%s: sources changed; reaping", __func__);
1142
in6m_reap(inm);
1143
}
1144
return (error);
1145
}
1146
1147
/*
1148
* Mark an in6_multi's filter set deltas as committed.
1149
* Called by MLD after a state change has been enqueued.
1150
*/
1151
void
1152
in6m_commit(struct in6_multi *inm)
1153
{
1154
struct ip6_msource *ims;
1155
1156
CTR2(KTR_MLD, "%s: commit inm %p", __func__, inm);
1157
CTR1(KTR_MLD, "%s: pre commit:", __func__);
1158
in6m_print(inm);
1159
1160
RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) {
1161
ims->im6s_st[0] = ims->im6s_st[1];
1162
}
1163
inm->in6m_st[0] = inm->in6m_st[1];
1164
}
1165
1166
/*
1167
* Reap unreferenced nodes from an in6_multi's filter set.
1168
*/
1169
static void
1170
in6m_reap(struct in6_multi *inm)
1171
{
1172
struct ip6_msource *ims, *tims;
1173
1174
RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) {
1175
if (ims->im6s_st[0].ex > 0 || ims->im6s_st[0].in > 0 ||
1176
ims->im6s_st[1].ex > 0 || ims->im6s_st[1].in > 0 ||
1177
ims->im6s_stp != 0)
1178
continue;
1179
CTR2(KTR_MLD, "%s: free ims %p", __func__, ims);
1180
RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims);
1181
free(ims, M_IP6MSOURCE);
1182
inm->in6m_nsrc--;
1183
}
1184
}
1185
1186
/*
1187
* Purge all source nodes from an in6_multi's filter set.
1188
*/
1189
static void
1190
in6m_purge(struct in6_multi *inm)
1191
{
1192
struct ip6_msource *ims, *tims;
1193
1194
RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) {
1195
CTR2(KTR_MLD, "%s: free ims %p", __func__, ims);
1196
RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims);
1197
free(ims, M_IP6MSOURCE);
1198
inm->in6m_nsrc--;
1199
}
1200
/* Free state-change requests that might be queued. */
1201
mbufq_drain(&inm->in6m_scq);
1202
}
1203
1204
/*
1205
* Join a multicast address w/o sources.
1206
* KAME compatibility entry point.
1207
*
1208
* SMPng: Assume no mc locks held by caller.
1209
*/
1210
int
1211
in6_joingroup(struct ifnet *ifp, const struct in6_addr *mcaddr,
1212
/*const*/ struct in6_mfilter *imf, struct in6_multi **pinm,
1213
const int delay)
1214
{
1215
int error;
1216
1217
IN6_MULTI_LOCK();
1218
error = in6_joingroup_locked(ifp, mcaddr, NULL, pinm, delay);
1219
IN6_MULTI_UNLOCK();
1220
return (error);
1221
}
1222
1223
/*
1224
* Join a multicast group; real entry point.
1225
*
1226
* Only preserves atomicity at inm level.
1227
* NOTE: imf argument cannot be const due to sys/tree.h limitations.
1228
*
1229
* If the MLD downcall fails, the group is not joined, and an error
1230
* code is returned.
1231
*/
1232
static int
1233
in6_joingroup_locked(struct ifnet *ifp, const struct in6_addr *mcaddr,
1234
/*const*/ struct in6_mfilter *imf, struct in6_multi **pinm,
1235
const int delay)
1236
{
1237
struct in6_multi_head inmh;
1238
struct in6_mfilter timf;
1239
struct in6_multi *inm;
1240
struct ifmultiaddr *ifma;
1241
int error;
1242
#ifdef KTR
1243
char ip6tbuf[INET6_ADDRSTRLEN];
1244
#endif
1245
1246
/*
1247
* Sanity: Check scope zone ID was set for ifp, if and
1248
* only if group is scoped to an interface.
1249
*/
1250
KASSERT(IN6_IS_ADDR_MULTICAST(mcaddr),
1251
("%s: not a multicast address", __func__));
1252
if (IN6_IS_ADDR_MC_LINKLOCAL(mcaddr) ||
1253
IN6_IS_ADDR_MC_INTFACELOCAL(mcaddr)) {
1254
KASSERT(mcaddr->s6_addr16[1] != 0,
1255
("%s: scope zone ID not set", __func__));
1256
}
1257
1258
IN6_MULTI_LOCK_ASSERT();
1259
IN6_MULTI_LIST_UNLOCK_ASSERT();
1260
1261
CTR4(KTR_MLD, "%s: join %s on %p(%s))", __func__,
1262
ip6_sprintf(ip6tbuf, mcaddr), ifp, if_name(ifp));
1263
1264
error = 0;
1265
inm = NULL;
1266
1267
/*
1268
* If no imf was specified (i.e. kernel consumer),
1269
* fake one up and assume it is an ASM join.
1270
*/
1271
if (imf == NULL) {
1272
im6f_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE);
1273
imf = &timf;
1274
}
1275
error = in6_getmulti(ifp, mcaddr, &inm);
1276
if (error) {
1277
CTR1(KTR_MLD, "%s: in6_getmulti() failure", __func__);
1278
return (error);
1279
}
1280
1281
IN6_MULTI_LIST_LOCK();
1282
CTR1(KTR_MLD, "%s: merge inm state", __func__);
1283
error = in6m_merge(inm, imf);
1284
if (error) {
1285
CTR1(KTR_MLD, "%s: failed to merge inm state", __func__);
1286
goto out_in6m_release;
1287
}
1288
1289
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
1290
error = mld_change_state(inm, delay);
1291
if (error) {
1292
CTR1(KTR_MLD, "%s: failed to update source", __func__);
1293
goto out_in6m_release;
1294
}
1295
1296
out_in6m_release:
1297
SLIST_INIT(&inmh);
1298
if (error) {
1299
struct epoch_tracker et;
1300
1301
CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm);
1302
IF_ADDR_WLOCK(ifp);
1303
NET_EPOCH_ENTER(et);
1304
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1305
if (ifma->ifma_protospec == inm) {
1306
ifma->ifma_protospec = NULL;
1307
break;
1308
}
1309
}
1310
in6m_disconnect_locked(&inmh, inm);
1311
in6m_rele_locked(&inmh, inm);
1312
NET_EPOCH_EXIT(et);
1313
IF_ADDR_WUNLOCK(ifp);
1314
} else {
1315
*pinm = inm;
1316
}
1317
IN6_MULTI_LIST_UNLOCK();
1318
in6m_release_list_deferred(&inmh);
1319
return (error);
1320
}
1321
1322
/*
1323
* Leave a multicast group; unlocked entry point.
1324
*/
1325
int
1326
in6_leavegroup(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
1327
{
1328
int error;
1329
1330
IN6_MULTI_LOCK();
1331
error = in6_leavegroup_locked(inm, imf);
1332
IN6_MULTI_UNLOCK();
1333
return (error);
1334
}
1335
1336
/*
1337
* Leave a multicast group; real entry point.
1338
* All source filters will be expunged.
1339
*
1340
* Only preserves atomicity at inm level.
1341
*
1342
* Holding the write lock for the INP which contains imf
1343
* is highly advisable. We can't assert for it as imf does not
1344
* contain a back-pointer to the owning inp.
1345
*
1346
* Note: This is not the same as in6m_release(*) as this function also
1347
* makes a state change downcall into MLD.
1348
*/
1349
int
1350
in6_leavegroup_locked(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf)
1351
{
1352
struct in6_multi_head inmh;
1353
struct in6_mfilter timf;
1354
struct ifnet *ifp;
1355
int error;
1356
#ifdef KTR
1357
char ip6tbuf[INET6_ADDRSTRLEN];
1358
#endif
1359
1360
error = 0;
1361
1362
IN6_MULTI_LOCK_ASSERT();
1363
1364
CTR5(KTR_MLD, "%s: leave inm %p, %s/%s, imf %p", __func__,
1365
inm, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1366
(in6m_is_ifp_detached(inm) ? "null" : if_name(inm->in6m_ifp)),
1367
imf);
1368
1369
/*
1370
* If no imf was specified (i.e. kernel consumer),
1371
* fake one up and assume it is an ASM join.
1372
*/
1373
if (imf == NULL) {
1374
im6f_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED);
1375
imf = &timf;
1376
}
1377
1378
/*
1379
* Begin state merge transaction at MLD layer.
1380
*
1381
* As this particular invocation should not cause any memory
1382
* to be allocated, and there is no opportunity to roll back
1383
* the transaction, it MUST NOT fail.
1384
*/
1385
1386
ifp = inm->in6m_ifp;
1387
IN6_MULTI_LIST_LOCK();
1388
CTR1(KTR_MLD, "%s: merge inm state", __func__);
1389
error = in6m_merge(inm, imf);
1390
KASSERT(error == 0, ("%s: failed to merge inm state", __func__));
1391
1392
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
1393
error = 0;
1394
if (ifp)
1395
error = mld_change_state(inm, 0);
1396
if (error)
1397
CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
1398
1399
CTR2(KTR_MLD, "%s: dropping ref on %p", __func__, inm);
1400
if (ifp)
1401
IF_ADDR_WLOCK(ifp);
1402
1403
SLIST_INIT(&inmh);
1404
if (inm->in6m_refcount == 1)
1405
in6m_disconnect_locked(&inmh, inm);
1406
in6m_rele_locked(&inmh, inm);
1407
if (ifp)
1408
IF_ADDR_WUNLOCK(ifp);
1409
IN6_MULTI_LIST_UNLOCK();
1410
in6m_release_list_deferred(&inmh);
1411
return (error);
1412
}
1413
1414
/*
1415
* Block or unblock an ASM multicast source on an inpcb.
1416
* This implements the delta-based API described in RFC 3678.
1417
*
1418
* The delta-based API applies only to exclusive-mode memberships.
1419
* An MLD downcall will be performed.
1420
*
1421
* Return 0 if successful, otherwise return an appropriate error code.
1422
*/
1423
static int
1424
in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt)
1425
{
1426
struct group_source_req gsr;
1427
struct epoch_tracker et;
1428
sockunion_t *gsa, *ssa;
1429
struct ifnet *ifp;
1430
struct in6_mfilter *imf;
1431
struct ip6_moptions *imo;
1432
struct in6_msource *ims;
1433
struct in6_multi *inm;
1434
uint16_t fmode;
1435
int error, doblock;
1436
#ifdef KTR
1437
char ip6tbuf[INET6_ADDRSTRLEN];
1438
#endif
1439
1440
ifp = NULL;
1441
error = 0;
1442
doblock = 0;
1443
1444
memset(&gsr, 0, sizeof(struct group_source_req));
1445
gsa = (sockunion_t *)&gsr.gsr_group;
1446
ssa = (sockunion_t *)&gsr.gsr_source;
1447
1448
switch (sopt->sopt_name) {
1449
case MCAST_BLOCK_SOURCE:
1450
case MCAST_UNBLOCK_SOURCE:
1451
error = sooptcopyin(sopt, &gsr,
1452
sizeof(struct group_source_req),
1453
sizeof(struct group_source_req));
1454
if (error)
1455
return (error);
1456
1457
if (gsa->sin6.sin6_family != AF_INET6 ||
1458
gsa->sin6.sin6_len != sizeof(struct sockaddr_in6))
1459
return (EINVAL);
1460
1461
if (ssa->sin6.sin6_family != AF_INET6 ||
1462
ssa->sin6.sin6_len != sizeof(struct sockaddr_in6))
1463
return (EINVAL);
1464
1465
/*
1466
* XXXGL: this function should use ifnet_byindex_ref, or
1467
* expand the epoch section all the way to where we put
1468
* the reference.
1469
*/
1470
NET_EPOCH_ENTER(et);
1471
ifp = ifnet_byindex(gsr.gsr_interface);
1472
NET_EPOCH_EXIT(et);
1473
if (ifp == NULL)
1474
return (EADDRNOTAVAIL);
1475
1476
if (sopt->sopt_name == MCAST_BLOCK_SOURCE)
1477
doblock = 1;
1478
break;
1479
1480
default:
1481
CTR2(KTR_MLD, "%s: unknown sopt_name %d",
1482
__func__, sopt->sopt_name);
1483
return (EOPNOTSUPP);
1484
break;
1485
}
1486
1487
if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr))
1488
return (EINVAL);
1489
1490
(void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL);
1491
1492
/*
1493
* Check if we are actually a member of this group.
1494
*/
1495
imo = in6p_findmoptions(inp);
1496
imf = im6o_match_group(imo, ifp, &gsa->sa);
1497
if (imf == NULL) {
1498
error = EADDRNOTAVAIL;
1499
goto out_in6p_locked;
1500
}
1501
inm = imf->im6f_in6m;
1502
1503
/*
1504
* Attempting to use the delta-based API on an
1505
* non exclusive-mode membership is an error.
1506
*/
1507
fmode = imf->im6f_st[0];
1508
if (fmode != MCAST_EXCLUDE) {
1509
error = EINVAL;
1510
goto out_in6p_locked;
1511
}
1512
1513
/*
1514
* Deal with error cases up-front:
1515
* Asked to block, but already blocked; or
1516
* Asked to unblock, but nothing to unblock.
1517
* If adding a new block entry, allocate it.
1518
*/
1519
ims = im6o_match_source(imf, &ssa->sa);
1520
if ((ims != NULL && doblock) || (ims == NULL && !doblock)) {
1521
CTR3(KTR_MLD, "%s: source %s %spresent", __func__,
1522
ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr),
1523
doblock ? "" : "not ");
1524
error = EADDRNOTAVAIL;
1525
goto out_in6p_locked;
1526
}
1527
1528
INP_WLOCK_ASSERT(inp);
1529
1530
/*
1531
* Begin state merge transaction at socket layer.
1532
*/
1533
if (doblock) {
1534
CTR2(KTR_MLD, "%s: %s source", __func__, "block");
1535
ims = im6f_graft(imf, fmode, &ssa->sin6);
1536
if (ims == NULL)
1537
error = ENOMEM;
1538
} else {
1539
CTR2(KTR_MLD, "%s: %s source", __func__, "allow");
1540
error = im6f_prune(imf, &ssa->sin6);
1541
}
1542
1543
if (error) {
1544
CTR1(KTR_MLD, "%s: merge imf state failed", __func__);
1545
goto out_im6f_rollback;
1546
}
1547
1548
/*
1549
* Begin state merge transaction at MLD layer.
1550
*/
1551
IN6_MULTI_LIST_LOCK();
1552
CTR1(KTR_MLD, "%s: merge inm state", __func__);
1553
error = in6m_merge(inm, imf);
1554
if (error)
1555
CTR1(KTR_MLD, "%s: failed to merge inm state", __func__);
1556
else {
1557
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
1558
error = mld_change_state(inm, 0);
1559
if (error)
1560
CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
1561
}
1562
1563
IN6_MULTI_LIST_UNLOCK();
1564
1565
out_im6f_rollback:
1566
if (error)
1567
im6f_rollback(imf);
1568
else
1569
im6f_commit(imf);
1570
1571
im6f_reap(imf);
1572
1573
out_in6p_locked:
1574
INP_WUNLOCK(inp);
1575
return (error);
1576
}
1577
1578
/*
1579
* Given an inpcb, return its multicast options structure pointer. Accepts
1580
* an unlocked inpcb pointer, but will return it locked. May sleep.
1581
*
1582
* SMPng: NOTE: Returns with the INP write lock held.
1583
*/
1584
static struct ip6_moptions *
1585
in6p_findmoptions(struct inpcb *inp)
1586
{
1587
struct ip6_moptions *imo;
1588
1589
INP_WLOCK(inp);
1590
if (inp->in6p_moptions != NULL)
1591
return (inp->in6p_moptions);
1592
1593
INP_WUNLOCK(inp);
1594
1595
imo = malloc(sizeof(*imo), M_IP6MOPTS, M_WAITOK);
1596
1597
imo->im6o_multicast_ifp = NULL;
1598
imo->im6o_multicast_hlim = V_ip6_defmcasthlim;
1599
imo->im6o_multicast_loop = in6_mcast_loop;
1600
STAILQ_INIT(&imo->im6o_head);
1601
1602
INP_WLOCK(inp);
1603
if (inp->in6p_moptions != NULL) {
1604
free(imo, M_IP6MOPTS);
1605
return (inp->in6p_moptions);
1606
}
1607
inp->in6p_moptions = imo;
1608
return (imo);
1609
}
1610
1611
/*
1612
* Discard the IPv6 multicast options (and source filters).
1613
*
1614
* SMPng: NOTE: assumes INP write lock is held.
1615
*
1616
* XXX can all be safely deferred to epoch_call
1617
*
1618
*/
1619
1620
static void
1621
inp_gcmoptions(struct ip6_moptions *imo)
1622
{
1623
struct in6_mfilter *imf;
1624
struct in6_multi *inm;
1625
struct ifnet *ifp;
1626
1627
while ((imf = ip6_mfilter_first(&imo->im6o_head)) != NULL) {
1628
ip6_mfilter_remove(&imo->im6o_head, imf);
1629
1630
im6f_leave(imf);
1631
if ((inm = imf->im6f_in6m) != NULL) {
1632
if ((ifp = inm->in6m_ifp) != NULL) {
1633
CURVNET_SET(ifp->if_vnet);
1634
(void)in6_leavegroup(inm, imf);
1635
CURVNET_RESTORE();
1636
} else {
1637
(void)in6_leavegroup(inm, imf);
1638
}
1639
}
1640
ip6_mfilter_free(imf);
1641
}
1642
free(imo, M_IP6MOPTS);
1643
}
1644
1645
void
1646
ip6_freemoptions(struct ip6_moptions *imo)
1647
{
1648
if (imo == NULL)
1649
return;
1650
inp_gcmoptions(imo);
1651
}
1652
1653
/*
1654
* Atomically get source filters on a socket for an IPv6 multicast group.
1655
* Called with INP lock held; returns with lock released.
1656
*/
1657
static int
1658
in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt)
1659
{
1660
struct epoch_tracker et;
1661
struct __msfilterreq msfr;
1662
sockunion_t *gsa;
1663
struct ifnet *ifp;
1664
struct ip6_moptions *imo;
1665
struct in6_mfilter *imf;
1666
struct ip6_msource *ims;
1667
struct in6_msource *lims;
1668
struct sockaddr_in6 *psin;
1669
struct sockaddr_storage *ptss;
1670
struct sockaddr_storage *tss;
1671
int error;
1672
size_t nsrcs, ncsrcs;
1673
1674
INP_WLOCK_ASSERT(inp);
1675
1676
imo = inp->in6p_moptions;
1677
KASSERT(imo != NULL, ("%s: null ip6_moptions", __func__));
1678
1679
INP_WUNLOCK(inp);
1680
1681
error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq),
1682
sizeof(struct __msfilterreq));
1683
if (error)
1684
return (error);
1685
1686
if (msfr.msfr_group.ss_family != AF_INET6 ||
1687
msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6))
1688
return (EINVAL);
1689
1690
gsa = (sockunion_t *)&msfr.msfr_group;
1691
if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr))
1692
return (EINVAL);
1693
1694
/*
1695
* XXXGL: this function should use ifnet_byindex_ref, or expand the
1696
* epoch section all the way to where the interface is referenced.
1697
*/
1698
NET_EPOCH_ENTER(et);
1699
ifp = ifnet_byindex(msfr.msfr_ifindex);
1700
NET_EPOCH_EXIT(et);
1701
if (ifp == NULL)
1702
return (EADDRNOTAVAIL);
1703
(void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL);
1704
1705
INP_WLOCK(inp);
1706
1707
/*
1708
* Lookup group on the socket.
1709
*/
1710
imf = im6o_match_group(imo, ifp, &gsa->sa);
1711
if (imf == NULL) {
1712
INP_WUNLOCK(inp);
1713
return (EADDRNOTAVAIL);
1714
}
1715
1716
/*
1717
* Ignore memberships which are in limbo.
1718
*/
1719
if (imf->im6f_st[1] == MCAST_UNDEFINED) {
1720
INP_WUNLOCK(inp);
1721
return (EAGAIN);
1722
}
1723
msfr.msfr_fmode = imf->im6f_st[1];
1724
1725
/*
1726
* If the user specified a buffer, copy out the source filter
1727
* entries to userland gracefully.
1728
* We only copy out the number of entries which userland
1729
* has asked for, but we always tell userland how big the
1730
* buffer really needs to be.
1731
*/
1732
if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc)
1733
msfr.msfr_nsrcs = in6_mcast_maxsocksrc;
1734
tss = NULL;
1735
if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) {
1736
tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs,
1737
M_TEMP, M_NOWAIT | M_ZERO);
1738
if (tss == NULL) {
1739
INP_WUNLOCK(inp);
1740
return (ENOBUFS);
1741
}
1742
}
1743
1744
/*
1745
* Count number of sources in-mode at t0.
1746
* If buffer space exists and remains, copy out source entries.
1747
*/
1748
nsrcs = msfr.msfr_nsrcs;
1749
ncsrcs = 0;
1750
ptss = tss;
1751
RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) {
1752
lims = (struct in6_msource *)ims;
1753
if (lims->im6sl_st[0] == MCAST_UNDEFINED ||
1754
lims->im6sl_st[0] != imf->im6f_st[0])
1755
continue;
1756
++ncsrcs;
1757
if (tss != NULL && nsrcs > 0) {
1758
psin = (struct sockaddr_in6 *)ptss;
1759
psin->sin6_family = AF_INET6;
1760
psin->sin6_len = sizeof(struct sockaddr_in6);
1761
psin->sin6_addr = lims->im6s_addr;
1762
psin->sin6_port = 0;
1763
--nsrcs;
1764
++ptss;
1765
}
1766
}
1767
1768
INP_WUNLOCK(inp);
1769
1770
if (tss != NULL) {
1771
error = copyout(tss, msfr.msfr_srcs,
1772
sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs);
1773
free(tss, M_TEMP);
1774
if (error)
1775
return (error);
1776
}
1777
1778
msfr.msfr_nsrcs = ncsrcs;
1779
error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq));
1780
1781
return (error);
1782
}
1783
1784
/*
1785
* Return the IP multicast options in response to user getsockopt().
1786
*/
1787
int
1788
ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt)
1789
{
1790
struct ip6_moptions *im6o;
1791
int error;
1792
u_int optval;
1793
1794
INP_WLOCK(inp);
1795
im6o = inp->in6p_moptions;
1796
/* If socket is neither of type SOCK_RAW or SOCK_DGRAM, reject it. */
1797
if (inp->inp_socket->so_proto->pr_type != SOCK_RAW &&
1798
inp->inp_socket->so_proto->pr_type != SOCK_DGRAM) {
1799
INP_WUNLOCK(inp);
1800
return (EOPNOTSUPP);
1801
}
1802
1803
error = 0;
1804
switch (sopt->sopt_name) {
1805
case IPV6_MULTICAST_IF:
1806
if (im6o == NULL || im6o->im6o_multicast_ifp == NULL) {
1807
optval = 0;
1808
} else {
1809
optval = im6o->im6o_multicast_ifp->if_index;
1810
}
1811
INP_WUNLOCK(inp);
1812
error = sooptcopyout(sopt, &optval, sizeof(u_int));
1813
break;
1814
1815
case IPV6_MULTICAST_HOPS:
1816
if (im6o == NULL)
1817
optval = V_ip6_defmcasthlim;
1818
else
1819
optval = im6o->im6o_multicast_hlim;
1820
INP_WUNLOCK(inp);
1821
error = sooptcopyout(sopt, &optval, sizeof(u_int));
1822
break;
1823
1824
case IPV6_MULTICAST_LOOP:
1825
if (im6o == NULL)
1826
optval = in6_mcast_loop; /* XXX VIMAGE */
1827
else
1828
optval = im6o->im6o_multicast_loop;
1829
INP_WUNLOCK(inp);
1830
error = sooptcopyout(sopt, &optval, sizeof(u_int));
1831
break;
1832
1833
case IPV6_MSFILTER:
1834
if (im6o == NULL) {
1835
error = EADDRNOTAVAIL;
1836
INP_WUNLOCK(inp);
1837
} else {
1838
error = in6p_get_source_filters(inp, sopt);
1839
}
1840
break;
1841
1842
default:
1843
INP_WUNLOCK(inp);
1844
error = ENOPROTOOPT;
1845
break;
1846
}
1847
1848
INP_UNLOCK_ASSERT(inp);
1849
1850
return (error);
1851
}
1852
1853
/*
1854
* Look up the ifnet to use for a multicast group membership,
1855
* given the address of an IPv6 group.
1856
*
1857
* This routine exists to support legacy IPv6 multicast applications.
1858
*
1859
* Use the socket's current FIB number for any required FIB lookup. Look up the
1860
* group address in the unicast FIB, and use its ifp; usually, this points to
1861
* the default next-hop. If the FIB lookup fails, return NULL.
1862
*
1863
* FUTURE: Support multiple forwarding tables for IPv6.
1864
*
1865
* Returns NULL if no ifp could be found.
1866
*/
1867
static struct ifnet *
1868
in6p_lookup_mcast_ifp(const struct inpcb *inp, const struct sockaddr_in6 *gsin6)
1869
{
1870
struct nhop_object *nh;
1871
struct in6_addr dst;
1872
uint32_t scopeid;
1873
uint32_t fibnum;
1874
1875
KASSERT(gsin6->sin6_family == AF_INET6,
1876
("%s: not AF_INET6 group", __func__));
1877
1878
in6_splitscope(&gsin6->sin6_addr, &dst, &scopeid);
1879
fibnum = inp->inp_inc.inc_fibnum;
1880
nh = fib6_lookup(fibnum, &dst, scopeid, 0, 0);
1881
1882
return (nh ? nh->nh_ifp : NULL);
1883
}
1884
1885
/*
1886
* Join an IPv6 multicast group, possibly with a source.
1887
*
1888
* FIXME: The KAME use of the unspecified address (::)
1889
* to join *all* multicast groups is currently unsupported.
1890
*
1891
* XXXGL: this function multiple times uses ifnet_byindex() without
1892
* proper protection - staying in epoch, or putting reference on ifnet.
1893
*/
1894
static int
1895
in6p_join_group(struct inpcb *inp, struct sockopt *sopt)
1896
{
1897
struct in6_multi_head inmh;
1898
struct group_source_req gsr;
1899
struct epoch_tracker et;
1900
sockunion_t *gsa, *ssa;
1901
struct ifnet *ifp;
1902
struct in6_mfilter *imf;
1903
struct ip6_moptions *imo;
1904
struct in6_multi *inm;
1905
struct in6_msource *lims;
1906
int error, is_new;
1907
1908
SLIST_INIT(&inmh);
1909
ifp = NULL;
1910
lims = NULL;
1911
error = 0;
1912
1913
memset(&gsr, 0, sizeof(struct group_source_req));
1914
gsa = (sockunion_t *)&gsr.gsr_group;
1915
gsa->ss.ss_family = AF_UNSPEC;
1916
ssa = (sockunion_t *)&gsr.gsr_source;
1917
ssa->ss.ss_family = AF_UNSPEC;
1918
1919
/*
1920
* Chew everything into struct group_source_req.
1921
* Overwrite the port field if present, as the sockaddr
1922
* being copied in may be matched with a binary comparison.
1923
* Ignore passed-in scope ID.
1924
*/
1925
switch (sopt->sopt_name) {
1926
case IPV6_JOIN_GROUP: {
1927
struct ipv6_mreq mreq;
1928
1929
error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq),
1930
sizeof(struct ipv6_mreq));
1931
if (error)
1932
return (error);
1933
1934
gsa->sin6.sin6_family = AF_INET6;
1935
gsa->sin6.sin6_len = sizeof(struct sockaddr_in6);
1936
gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr;
1937
1938
if (mreq.ipv6mr_interface == 0) {
1939
ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6);
1940
} else {
1941
NET_EPOCH_ENTER(et);
1942
ifp = ifnet_byindex(mreq.ipv6mr_interface);
1943
NET_EPOCH_EXIT(et);
1944
if (ifp == NULL)
1945
return (EADDRNOTAVAIL);
1946
}
1947
CTR3(KTR_MLD, "%s: ipv6mr_interface = %d, ifp = %p",
1948
__func__, mreq.ipv6mr_interface, ifp);
1949
} break;
1950
1951
case MCAST_JOIN_GROUP:
1952
case MCAST_JOIN_SOURCE_GROUP:
1953
if (sopt->sopt_name == MCAST_JOIN_GROUP) {
1954
error = sooptcopyin(sopt, &gsr,
1955
sizeof(struct group_req),
1956
sizeof(struct group_req));
1957
} else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) {
1958
error = sooptcopyin(sopt, &gsr,
1959
sizeof(struct group_source_req),
1960
sizeof(struct group_source_req));
1961
}
1962
if (error)
1963
return (error);
1964
1965
if (gsa->sin6.sin6_family != AF_INET6 ||
1966
gsa->sin6.sin6_len != sizeof(struct sockaddr_in6))
1967
return (EINVAL);
1968
1969
if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) {
1970
if (ssa->sin6.sin6_family != AF_INET6 ||
1971
ssa->sin6.sin6_len != sizeof(struct sockaddr_in6))
1972
return (EINVAL);
1973
if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr))
1974
return (EINVAL);
1975
/*
1976
* TODO: Validate embedded scope ID in source
1977
* list entry against passed-in ifp, if and only
1978
* if source list filter entry is iface or node local.
1979
*/
1980
in6_clearscope(&ssa->sin6.sin6_addr);
1981
ssa->sin6.sin6_port = 0;
1982
ssa->sin6.sin6_scope_id = 0;
1983
}
1984
NET_EPOCH_ENTER(et);
1985
ifp = ifnet_byindex(gsr.gsr_interface);
1986
NET_EPOCH_EXIT(et);
1987
if (ifp == NULL)
1988
return (EADDRNOTAVAIL);
1989
break;
1990
1991
default:
1992
CTR2(KTR_MLD, "%s: unknown sopt_name %d",
1993
__func__, sopt->sopt_name);
1994
return (EOPNOTSUPP);
1995
break;
1996
}
1997
1998
if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr))
1999
return (EINVAL);
2000
2001
if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0)
2002
return (EADDRNOTAVAIL);
2003
2004
gsa->sin6.sin6_port = 0;
2005
gsa->sin6.sin6_scope_id = 0;
2006
2007
/*
2008
* Always set the scope zone ID on memberships created from userland.
2009
* Use the passed-in ifp to do this.
2010
* XXX The in6_setscope() return value is meaningless.
2011
* XXX SCOPE6_LOCK() is taken by in6_setscope().
2012
*/
2013
(void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL);
2014
2015
IN6_MULTI_LOCK();
2016
2017
/*
2018
* Find the membership in the membership list.
2019
*/
2020
imo = in6p_findmoptions(inp);
2021
imf = im6o_match_group(imo, ifp, &gsa->sa);
2022
if (imf == NULL) {
2023
is_new = 1;
2024
inm = NULL;
2025
2026
if (ip6_mfilter_count(&imo->im6o_head) >= IPV6_MAX_MEMBERSHIPS) {
2027
error = ENOMEM;
2028
goto out_in6p_locked;
2029
}
2030
} else {
2031
is_new = 0;
2032
inm = imf->im6f_in6m;
2033
2034
if (ssa->ss.ss_family != AF_UNSPEC) {
2035
/*
2036
* MCAST_JOIN_SOURCE_GROUP on an exclusive membership
2037
* is an error. On an existing inclusive membership,
2038
* it just adds the source to the filter list.
2039
*/
2040
if (imf->im6f_st[1] != MCAST_INCLUDE) {
2041
error = EINVAL;
2042
goto out_in6p_locked;
2043
}
2044
/*
2045
* Throw out duplicates.
2046
*
2047
* XXX FIXME: This makes a naive assumption that
2048
* even if entries exist for *ssa in this imf,
2049
* they will be rejected as dupes, even if they
2050
* are not valid in the current mode (in-mode).
2051
*
2052
* in6_msource is transactioned just as for anything
2053
* else in SSM -- but note naive use of in6m_graft()
2054
* below for allocating new filter entries.
2055
*
2056
* This is only an issue if someone mixes the
2057
* full-state SSM API with the delta-based API,
2058
* which is discouraged in the relevant RFCs.
2059
*/
2060
lims = im6o_match_source(imf, &ssa->sa);
2061
if (lims != NULL /*&&
2062
lims->im6sl_st[1] == MCAST_INCLUDE*/) {
2063
error = EADDRNOTAVAIL;
2064
goto out_in6p_locked;
2065
}
2066
} else {
2067
/*
2068
* MCAST_JOIN_GROUP alone, on any existing membership,
2069
* is rejected, to stop the same inpcb tying up
2070
* multiple refs to the in_multi.
2071
* On an existing inclusive membership, this is also
2072
* an error; if you want to change filter mode,
2073
* you must use the userland API setsourcefilter().
2074
* XXX We don't reject this for imf in UNDEFINED
2075
* state at t1, because allocation of a filter
2076
* is atomic with allocation of a membership.
2077
*/
2078
error = EADDRINUSE;
2079
goto out_in6p_locked;
2080
}
2081
}
2082
2083
/*
2084
* Begin state merge transaction at socket layer.
2085
*/
2086
INP_WLOCK_ASSERT(inp);
2087
2088
/*
2089
* Graft new source into filter list for this inpcb's
2090
* membership of the group. The in6_multi may not have
2091
* been allocated yet if this is a new membership, however,
2092
* the in_mfilter slot will be allocated and must be initialized.
2093
*
2094
* Note: Grafting of exclusive mode filters doesn't happen
2095
* in this path.
2096
* XXX: Should check for non-NULL lims (node exists but may
2097
* not be in-mode) for interop with full-state API.
2098
*/
2099
if (ssa->ss.ss_family != AF_UNSPEC) {
2100
/* Membership starts in IN mode */
2101
if (is_new) {
2102
CTR1(KTR_MLD, "%s: new join w/source", __func__);
2103
imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_INCLUDE);
2104
if (imf == NULL) {
2105
error = ENOMEM;
2106
goto out_in6p_locked;
2107
}
2108
} else {
2109
CTR2(KTR_MLD, "%s: %s source", __func__, "allow");
2110
}
2111
lims = im6f_graft(imf, MCAST_INCLUDE, &ssa->sin6);
2112
if (lims == NULL) {
2113
CTR1(KTR_MLD, "%s: merge imf state failed",
2114
__func__);
2115
error = ENOMEM;
2116
goto out_in6p_locked;
2117
}
2118
} else {
2119
/* No address specified; Membership starts in EX mode */
2120
if (is_new) {
2121
CTR1(KTR_MLD, "%s: new join w/o source", __func__);
2122
imf = ip6_mfilter_alloc(M_NOWAIT, MCAST_UNDEFINED, MCAST_EXCLUDE);
2123
if (imf == NULL) {
2124
error = ENOMEM;
2125
goto out_in6p_locked;
2126
}
2127
}
2128
}
2129
2130
/*
2131
* Begin state merge transaction at MLD layer.
2132
*/
2133
if (is_new) {
2134
in_pcbref(inp);
2135
INP_WUNLOCK(inp);
2136
2137
error = in6_joingroup_locked(ifp, &gsa->sin6.sin6_addr, imf,
2138
&imf->im6f_in6m, 0);
2139
2140
INP_WLOCK(inp);
2141
if (in_pcbrele_wlocked(inp)) {
2142
error = ENXIO;
2143
goto out_in6p_unlocked;
2144
}
2145
if (error) {
2146
goto out_in6p_locked;
2147
}
2148
/*
2149
* NOTE: Refcount from in6_joingroup_locked()
2150
* is protecting membership.
2151
*/
2152
ip6_mfilter_insert(&imo->im6o_head, imf);
2153
} else {
2154
CTR1(KTR_MLD, "%s: merge inm state", __func__);
2155
IN6_MULTI_LIST_LOCK();
2156
error = in6m_merge(inm, imf);
2157
if (error) {
2158
CTR1(KTR_MLD, "%s: failed to merge inm state",
2159
__func__);
2160
IN6_MULTI_LIST_UNLOCK();
2161
im6f_rollback(imf);
2162
im6f_reap(imf);
2163
goto out_in6p_locked;
2164
}
2165
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
2166
error = mld_change_state(inm, 0);
2167
IN6_MULTI_LIST_UNLOCK();
2168
2169
if (error) {
2170
CTR1(KTR_MLD, "%s: failed mld downcall",
2171
__func__);
2172
im6f_rollback(imf);
2173
im6f_reap(imf);
2174
goto out_in6p_locked;
2175
}
2176
}
2177
2178
im6f_commit(imf);
2179
imf = NULL;
2180
2181
out_in6p_locked:
2182
INP_WUNLOCK(inp);
2183
out_in6p_unlocked:
2184
IN6_MULTI_UNLOCK();
2185
2186
if (is_new && imf) {
2187
if (imf->im6f_in6m != NULL) {
2188
struct in6_multi_head inmh;
2189
2190
SLIST_INIT(&inmh);
2191
SLIST_INSERT_HEAD(&inmh, imf->im6f_in6m, in6m_defer);
2192
in6m_release_list_deferred(&inmh);
2193
}
2194
ip6_mfilter_free(imf);
2195
}
2196
return (error);
2197
}
2198
2199
/*
2200
* Leave an IPv6 multicast group on an inpcb, possibly with a source.
2201
*/
2202
static int
2203
in6p_leave_group(struct inpcb *inp, struct sockopt *sopt)
2204
{
2205
struct ipv6_mreq mreq;
2206
struct group_source_req gsr;
2207
struct epoch_tracker et;
2208
sockunion_t *gsa, *ssa;
2209
struct ifnet *ifp;
2210
struct in6_mfilter *imf;
2211
struct ip6_moptions *imo;
2212
struct in6_msource *ims;
2213
struct in6_multi *inm;
2214
uint32_t ifindex;
2215
int error;
2216
bool is_final;
2217
#ifdef KTR
2218
char ip6tbuf[INET6_ADDRSTRLEN];
2219
#endif
2220
2221
ifp = NULL;
2222
ifindex = 0;
2223
error = 0;
2224
is_final = true;
2225
2226
memset(&gsr, 0, sizeof(struct group_source_req));
2227
gsa = (sockunion_t *)&gsr.gsr_group;
2228
gsa->ss.ss_family = AF_UNSPEC;
2229
ssa = (sockunion_t *)&gsr.gsr_source;
2230
ssa->ss.ss_family = AF_UNSPEC;
2231
2232
/*
2233
* Chew everything passed in up into a struct group_source_req
2234
* as that is easier to process.
2235
* Note: Any embedded scope ID in the multicast group passed
2236
* in by userland is ignored, the interface index is the recommended
2237
* mechanism to specify an interface; see below.
2238
*/
2239
switch (sopt->sopt_name) {
2240
case IPV6_LEAVE_GROUP:
2241
error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq),
2242
sizeof(struct ipv6_mreq));
2243
if (error)
2244
return (error);
2245
gsa->sin6.sin6_family = AF_INET6;
2246
gsa->sin6.sin6_len = sizeof(struct sockaddr_in6);
2247
gsa->sin6.sin6_addr = mreq.ipv6mr_multiaddr;
2248
gsa->sin6.sin6_port = 0;
2249
gsa->sin6.sin6_scope_id = 0;
2250
ifindex = mreq.ipv6mr_interface;
2251
break;
2252
2253
case MCAST_LEAVE_GROUP:
2254
case MCAST_LEAVE_SOURCE_GROUP:
2255
if (sopt->sopt_name == MCAST_LEAVE_GROUP) {
2256
error = sooptcopyin(sopt, &gsr,
2257
sizeof(struct group_req),
2258
sizeof(struct group_req));
2259
} else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) {
2260
error = sooptcopyin(sopt, &gsr,
2261
sizeof(struct group_source_req),
2262
sizeof(struct group_source_req));
2263
}
2264
if (error)
2265
return (error);
2266
2267
if (gsa->sin6.sin6_family != AF_INET6 ||
2268
gsa->sin6.sin6_len != sizeof(struct sockaddr_in6))
2269
return (EINVAL);
2270
if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) {
2271
if (ssa->sin6.sin6_family != AF_INET6 ||
2272
ssa->sin6.sin6_len != sizeof(struct sockaddr_in6))
2273
return (EINVAL);
2274
if (IN6_IS_ADDR_MULTICAST(&ssa->sin6.sin6_addr))
2275
return (EINVAL);
2276
/*
2277
* TODO: Validate embedded scope ID in source
2278
* list entry against passed-in ifp, if and only
2279
* if source list filter entry is iface or node local.
2280
*/
2281
in6_clearscope(&ssa->sin6.sin6_addr);
2282
}
2283
gsa->sin6.sin6_port = 0;
2284
gsa->sin6.sin6_scope_id = 0;
2285
ifindex = gsr.gsr_interface;
2286
break;
2287
2288
default:
2289
CTR2(KTR_MLD, "%s: unknown sopt_name %d",
2290
__func__, sopt->sopt_name);
2291
return (EOPNOTSUPP);
2292
break;
2293
}
2294
2295
if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr))
2296
return (EINVAL);
2297
2298
/*
2299
* Validate interface index if provided. If no interface index
2300
* was provided separately, attempt to look the membership up
2301
* from the default scope as a last resort to disambiguate
2302
* the membership we are being asked to leave.
2303
* XXX SCOPE6 lock potentially taken here.
2304
*/
2305
if (ifindex != 0) {
2306
NET_EPOCH_ENTER(et);
2307
ifp = ifnet_byindex(ifindex);
2308
NET_EPOCH_EXIT(et); /* XXXGL: unsafe ifp */
2309
if (ifp == NULL)
2310
return (EADDRNOTAVAIL);
2311
(void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL);
2312
} else {
2313
error = sa6_embedscope(&gsa->sin6, V_ip6_use_defzone);
2314
if (error)
2315
return (EADDRNOTAVAIL);
2316
/*
2317
* Some badly behaved applications don't pass an ifindex
2318
* or a scope ID, which is an API violation. In this case,
2319
* perform a lookup as per a v6 join.
2320
*
2321
* XXX For now, stomp on zone ID for the corner case.
2322
* This is not the 'KAME way', but we need to see the ifp
2323
* directly until such time as this implementation is
2324
* refactored, assuming the scope IDs are the way to go.
2325
*/
2326
ifindex = ntohs(gsa->sin6.sin6_addr.s6_addr16[1]);
2327
if (ifindex == 0) {
2328
CTR2(KTR_MLD, "%s: warning: no ifindex, looking up "
2329
"ifp for group %s.", __func__,
2330
ip6_sprintf(ip6tbuf, &gsa->sin6.sin6_addr));
2331
ifp = in6p_lookup_mcast_ifp(inp, &gsa->sin6);
2332
} else {
2333
NET_EPOCH_ENTER(et);
2334
ifp = ifnet_byindex(ifindex);
2335
NET_EPOCH_EXIT(et); /* XXXGL: unsafe ifp */
2336
}
2337
if (ifp == NULL)
2338
return (EADDRNOTAVAIL);
2339
}
2340
2341
CTR2(KTR_MLD, "%s: ifp = %p", __func__, ifp);
2342
KASSERT(ifp != NULL, ("%s: ifp did not resolve", __func__));
2343
2344
IN6_MULTI_LOCK();
2345
2346
/*
2347
* Find the membership in the membership list.
2348
*/
2349
imo = in6p_findmoptions(inp);
2350
imf = im6o_match_group(imo, ifp, &gsa->sa);
2351
if (imf == NULL) {
2352
error = EADDRNOTAVAIL;
2353
goto out_in6p_locked;
2354
}
2355
inm = imf->im6f_in6m;
2356
2357
if (ssa->ss.ss_family != AF_UNSPEC)
2358
is_final = false;
2359
2360
/*
2361
* Begin state merge transaction at socket layer.
2362
*/
2363
INP_WLOCK_ASSERT(inp);
2364
2365
/*
2366
* If we were instructed only to leave a given source, do so.
2367
* MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships.
2368
*/
2369
if (is_final) {
2370
ip6_mfilter_remove(&imo->im6o_head, imf);
2371
im6f_leave(imf);
2372
2373
/*
2374
* Give up the multicast address record to which
2375
* the membership points.
2376
*/
2377
(void)in6_leavegroup_locked(inm, imf);
2378
} else {
2379
if (imf->im6f_st[0] == MCAST_EXCLUDE) {
2380
error = EADDRNOTAVAIL;
2381
goto out_in6p_locked;
2382
}
2383
ims = im6o_match_source(imf, &ssa->sa);
2384
if (ims == NULL) {
2385
CTR3(KTR_MLD, "%s: source %p %spresent", __func__,
2386
ip6_sprintf(ip6tbuf, &ssa->sin6.sin6_addr),
2387
"not ");
2388
error = EADDRNOTAVAIL;
2389
goto out_in6p_locked;
2390
}
2391
CTR2(KTR_MLD, "%s: %s source", __func__, "block");
2392
error = im6f_prune(imf, &ssa->sin6);
2393
if (error) {
2394
CTR1(KTR_MLD, "%s: merge imf state failed",
2395
__func__);
2396
goto out_in6p_locked;
2397
}
2398
}
2399
2400
/*
2401
* Begin state merge transaction at MLD layer.
2402
*/
2403
if (!is_final) {
2404
CTR1(KTR_MLD, "%s: merge inm state", __func__);
2405
IN6_MULTI_LIST_LOCK();
2406
error = in6m_merge(inm, imf);
2407
if (error) {
2408
CTR1(KTR_MLD, "%s: failed to merge inm state",
2409
__func__);
2410
IN6_MULTI_LIST_UNLOCK();
2411
im6f_rollback(imf);
2412
im6f_reap(imf);
2413
goto out_in6p_locked;
2414
}
2415
2416
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
2417
error = mld_change_state(inm, 0);
2418
IN6_MULTI_LIST_UNLOCK();
2419
if (error) {
2420
CTR1(KTR_MLD, "%s: failed mld downcall",
2421
__func__);
2422
im6f_rollback(imf);
2423
im6f_reap(imf);
2424
goto out_in6p_locked;
2425
}
2426
}
2427
2428
im6f_commit(imf);
2429
im6f_reap(imf);
2430
2431
out_in6p_locked:
2432
INP_WUNLOCK(inp);
2433
2434
if (is_final && imf)
2435
ip6_mfilter_free(imf);
2436
2437
IN6_MULTI_UNLOCK();
2438
return (error);
2439
}
2440
2441
/*
2442
* Select the interface for transmitting IPv6 multicast datagrams.
2443
*
2444
* Either an instance of struct in6_addr or an instance of struct ipv6_mreqn
2445
* may be passed to this socket option. An address of in6addr_any or an
2446
* interface index of 0 is used to remove a previous selection.
2447
* When no interface is selected, one is chosen for every send.
2448
*/
2449
static int
2450
in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt)
2451
{
2452
struct epoch_tracker et;
2453
struct ifnet *ifp;
2454
struct ip6_moptions *imo;
2455
u_int ifindex;
2456
int error;
2457
2458
if (sopt->sopt_valsize != sizeof(u_int))
2459
return (EINVAL);
2460
2461
error = sooptcopyin(sopt, &ifindex, sizeof(u_int), sizeof(u_int));
2462
if (error)
2463
return (error);
2464
NET_EPOCH_ENTER(et);
2465
if (ifindex == 0)
2466
ifp = NULL;
2467
else {
2468
ifp = ifnet_byindex(ifindex);
2469
if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) {
2470
NET_EPOCH_EXIT(et);
2471
return (EADDRNOTAVAIL);
2472
}
2473
}
2474
NET_EPOCH_EXIT(et); /* XXXGL: unsafe ifp */
2475
imo = in6p_findmoptions(inp);
2476
imo->im6o_multicast_ifp = ifp;
2477
INP_WUNLOCK(inp);
2478
2479
return (0);
2480
}
2481
2482
/*
2483
* Atomically set source filters on a socket for an IPv6 multicast group.
2484
*
2485
* XXXGL: unsafely exits epoch with ifnet pointer
2486
*/
2487
static int
2488
in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt)
2489
{
2490
struct __msfilterreq msfr;
2491
struct epoch_tracker et;
2492
sockunion_t *gsa;
2493
struct ifnet *ifp;
2494
struct in6_mfilter *imf;
2495
struct ip6_moptions *imo;
2496
struct in6_multi *inm;
2497
int error;
2498
2499
error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq),
2500
sizeof(struct __msfilterreq));
2501
if (error)
2502
return (error);
2503
2504
if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc)
2505
return (ENOBUFS);
2506
2507
if (msfr.msfr_fmode != MCAST_EXCLUDE &&
2508
msfr.msfr_fmode != MCAST_INCLUDE)
2509
return (EINVAL);
2510
2511
if (msfr.msfr_group.ss_family != AF_INET6 ||
2512
msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6))
2513
return (EINVAL);
2514
2515
gsa = (sockunion_t *)&msfr.msfr_group;
2516
if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6.sin6_addr))
2517
return (EINVAL);
2518
2519
gsa->sin6.sin6_port = 0; /* ignore port */
2520
2521
NET_EPOCH_ENTER(et);
2522
ifp = ifnet_byindex(msfr.msfr_ifindex);
2523
NET_EPOCH_EXIT(et);
2524
if (ifp == NULL)
2525
return (EADDRNOTAVAIL);
2526
(void)in6_setscope(&gsa->sin6.sin6_addr, ifp, NULL);
2527
2528
/*
2529
* Take the INP write lock.
2530
* Check if this socket is a member of this group.
2531
*/
2532
imo = in6p_findmoptions(inp);
2533
imf = im6o_match_group(imo, ifp, &gsa->sa);
2534
if (imf == NULL) {
2535
error = EADDRNOTAVAIL;
2536
goto out_in6p_locked;
2537
}
2538
inm = imf->im6f_in6m;
2539
2540
/*
2541
* Begin state merge transaction at socket layer.
2542
*/
2543
INP_WLOCK_ASSERT(inp);
2544
2545
imf->im6f_st[1] = msfr.msfr_fmode;
2546
2547
/*
2548
* Apply any new source filters, if present.
2549
* Make a copy of the user-space source vector so
2550
* that we may copy them with a single copyin. This
2551
* allows us to deal with page faults up-front.
2552
*/
2553
if (msfr.msfr_nsrcs > 0) {
2554
struct in6_msource *lims;
2555
struct sockaddr_in6 *psin;
2556
struct sockaddr_storage *kss, *pkss;
2557
int i;
2558
2559
INP_WUNLOCK(inp);
2560
2561
CTR2(KTR_MLD, "%s: loading %lu source list entries",
2562
__func__, (unsigned long)msfr.msfr_nsrcs);
2563
kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs,
2564
M_TEMP, M_WAITOK);
2565
error = copyin(msfr.msfr_srcs, kss,
2566
sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs);
2567
if (error) {
2568
free(kss, M_TEMP);
2569
return (error);
2570
}
2571
2572
INP_WLOCK(inp);
2573
2574
/*
2575
* Mark all source filters as UNDEFINED at t1.
2576
* Restore new group filter mode, as im6f_leave()
2577
* will set it to INCLUDE.
2578
*/
2579
im6f_leave(imf);
2580
imf->im6f_st[1] = msfr.msfr_fmode;
2581
2582
/*
2583
* Update socket layer filters at t1, lazy-allocating
2584
* new entries. This saves a bunch of memory at the
2585
* cost of one RB_FIND() per source entry; duplicate
2586
* entries in the msfr_nsrcs vector are ignored.
2587
* If we encounter an error, rollback transaction.
2588
*
2589
* XXX This too could be replaced with a set-symmetric
2590
* difference like loop to avoid walking from root
2591
* every time, as the key space is common.
2592
*/
2593
for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) {
2594
psin = (struct sockaddr_in6 *)pkss;
2595
if (psin->sin6_family != AF_INET6) {
2596
error = EAFNOSUPPORT;
2597
break;
2598
}
2599
if (psin->sin6_len != sizeof(struct sockaddr_in6)) {
2600
error = EINVAL;
2601
break;
2602
}
2603
if (IN6_IS_ADDR_MULTICAST(&psin->sin6_addr)) {
2604
error = EINVAL;
2605
break;
2606
}
2607
/*
2608
* TODO: Validate embedded scope ID in source
2609
* list entry against passed-in ifp, if and only
2610
* if source list filter entry is iface or node local.
2611
*/
2612
in6_clearscope(&psin->sin6_addr);
2613
error = im6f_get_source(imf, psin, &lims);
2614
if (error)
2615
break;
2616
lims->im6sl_st[1] = imf->im6f_st[1];
2617
}
2618
free(kss, M_TEMP);
2619
}
2620
2621
if (error)
2622
goto out_im6f_rollback;
2623
2624
INP_WLOCK_ASSERT(inp);
2625
IN6_MULTI_LIST_LOCK();
2626
2627
/*
2628
* Begin state merge transaction at MLD layer.
2629
*/
2630
CTR1(KTR_MLD, "%s: merge inm state", __func__);
2631
error = in6m_merge(inm, imf);
2632
if (error)
2633
CTR1(KTR_MLD, "%s: failed to merge inm state", __func__);
2634
else {
2635
CTR1(KTR_MLD, "%s: doing mld downcall", __func__);
2636
error = mld_change_state(inm, 0);
2637
if (error)
2638
CTR1(KTR_MLD, "%s: failed mld downcall", __func__);
2639
}
2640
2641
IN6_MULTI_LIST_UNLOCK();
2642
2643
out_im6f_rollback:
2644
if (error)
2645
im6f_rollback(imf);
2646
else
2647
im6f_commit(imf);
2648
2649
im6f_reap(imf);
2650
2651
out_in6p_locked:
2652
INP_WUNLOCK(inp);
2653
return (error);
2654
}
2655
2656
/*
2657
* Set the IP multicast options in response to user setsockopt().
2658
*
2659
* Many of the socket options handled in this function duplicate the
2660
* functionality of socket options in the regular unicast API. However,
2661
* it is not possible to merge the duplicate code, because the idempotence
2662
* of the IPv6 multicast part of the BSD Sockets API must be preserved;
2663
* the effects of these options must be treated as separate and distinct.
2664
*
2665
* SMPng: XXX: Unlocked read of inp_socket believed OK.
2666
*/
2667
int
2668
ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt)
2669
{
2670
struct ip6_moptions *im6o;
2671
int error;
2672
2673
error = 0;
2674
2675
/* If socket is neither of type SOCK_RAW or SOCK_DGRAM, reject it. */
2676
if (inp->inp_socket->so_proto->pr_type != SOCK_RAW &&
2677
inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)
2678
return (EOPNOTSUPP);
2679
2680
switch (sopt->sopt_name) {
2681
case IPV6_MULTICAST_IF:
2682
error = in6p_set_multicast_if(inp, sopt);
2683
break;
2684
2685
case IPV6_MULTICAST_HOPS: {
2686
int hlim;
2687
2688
if (sopt->sopt_valsize != sizeof(int)) {
2689
error = EINVAL;
2690
break;
2691
}
2692
error = sooptcopyin(sopt, &hlim, sizeof(hlim), sizeof(int));
2693
if (error)
2694
break;
2695
if (hlim < -1 || hlim > 255) {
2696
error = EINVAL;
2697
break;
2698
} else if (hlim == -1) {
2699
hlim = V_ip6_defmcasthlim;
2700
}
2701
im6o = in6p_findmoptions(inp);
2702
im6o->im6o_multicast_hlim = hlim;
2703
INP_WUNLOCK(inp);
2704
break;
2705
}
2706
2707
case IPV6_MULTICAST_LOOP: {
2708
u_int loop;
2709
2710
/*
2711
* Set the loopback flag for outgoing multicast packets.
2712
* Must be zero or one.
2713
*/
2714
if (sopt->sopt_valsize != sizeof(u_int)) {
2715
error = EINVAL;
2716
break;
2717
}
2718
error = sooptcopyin(sopt, &loop, sizeof(u_int), sizeof(u_int));
2719
if (error)
2720
break;
2721
if (loop > 1) {
2722
error = EINVAL;
2723
break;
2724
}
2725
im6o = in6p_findmoptions(inp);
2726
im6o->im6o_multicast_loop = loop;
2727
INP_WUNLOCK(inp);
2728
break;
2729
}
2730
2731
case IPV6_JOIN_GROUP:
2732
case MCAST_JOIN_GROUP:
2733
case MCAST_JOIN_SOURCE_GROUP:
2734
error = in6p_join_group(inp, sopt);
2735
break;
2736
2737
case IPV6_LEAVE_GROUP:
2738
case MCAST_LEAVE_GROUP:
2739
case MCAST_LEAVE_SOURCE_GROUP:
2740
error = in6p_leave_group(inp, sopt);
2741
break;
2742
2743
case MCAST_BLOCK_SOURCE:
2744
case MCAST_UNBLOCK_SOURCE:
2745
error = in6p_block_unblock_source(inp, sopt);
2746
break;
2747
2748
case IPV6_MSFILTER:
2749
error = in6p_set_source_filters(inp, sopt);
2750
break;
2751
2752
default:
2753
error = EOPNOTSUPP;
2754
break;
2755
}
2756
2757
INP_UNLOCK_ASSERT(inp);
2758
2759
return (error);
2760
}
2761
2762
/*
2763
* Expose MLD's multicast filter mode and source list(s) to userland,
2764
* keyed by (ifindex, group).
2765
* The filter mode is written out as a uint32_t, followed by
2766
* 0..n of struct in6_addr.
2767
* For use by ifmcstat(8).
2768
* SMPng: NOTE: unlocked read of ifindex space.
2769
*/
2770
static int
2771
sysctl_ip6_mcast_filters(SYSCTL_HANDLER_ARGS)
2772
{
2773
struct in6_addr mcaddr;
2774
struct in6_addr src;
2775
struct epoch_tracker et;
2776
struct ifnet *ifp;
2777
struct ifmultiaddr *ifma;
2778
struct in6_multi *inm;
2779
struct ip6_msource *ims;
2780
int *name;
2781
int retval;
2782
u_int namelen;
2783
uint32_t fmode, ifindex;
2784
#ifdef KTR
2785
char ip6tbuf[INET6_ADDRSTRLEN];
2786
#endif
2787
2788
name = (int *)arg1;
2789
namelen = arg2;
2790
2791
if (req->newptr != NULL)
2792
return (EPERM);
2793
2794
/* int: ifindex + 4 * 32 bits of IPv6 address */
2795
if (namelen != 5)
2796
return (EINVAL);
2797
2798
memcpy(&mcaddr, &name[1], sizeof(struct in6_addr));
2799
if (!IN6_IS_ADDR_MULTICAST(&mcaddr)) {
2800
CTR2(KTR_MLD, "%s: group %s is not multicast",
2801
__func__, ip6_sprintf(ip6tbuf, &mcaddr));
2802
return (EINVAL);
2803
}
2804
2805
ifindex = name[0];
2806
NET_EPOCH_ENTER(et);
2807
ifp = ifnet_byindex(ifindex);
2808
if (ifp == NULL) {
2809
NET_EPOCH_EXIT(et);
2810
CTR2(KTR_MLD, "%s: no ifp for ifindex %u",
2811
__func__, ifindex);
2812
return (ENOENT);
2813
}
2814
/*
2815
* Internal MLD lookups require that scope/zone ID is set.
2816
*/
2817
(void)in6_setscope(&mcaddr, ifp, NULL);
2818
2819
retval = sysctl_wire_old_buffer(req,
2820
sizeof(uint32_t) + (in6_mcast_maxgrpsrc * sizeof(struct in6_addr)));
2821
if (retval) {
2822
NET_EPOCH_EXIT(et);
2823
return (retval);
2824
}
2825
2826
IN6_MULTI_LOCK();
2827
IN6_MULTI_LIST_LOCK();
2828
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2829
inm = in6m_ifmultiaddr_get_inm(ifma);
2830
if (inm == NULL)
2831
continue;
2832
if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr))
2833
continue;
2834
fmode = inm->in6m_st[1].iss_fmode;
2835
retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t));
2836
if (retval != 0)
2837
break;
2838
RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) {
2839
CTR2(KTR_MLD, "%s: visit node %p", __func__, ims);
2840
/*
2841
* Only copy-out sources which are in-mode.
2842
*/
2843
if (fmode != im6s_get_mode(inm, ims, 1)) {
2844
CTR1(KTR_MLD, "%s: skip non-in-mode",
2845
__func__);
2846
continue;
2847
}
2848
src = ims->im6s_addr;
2849
retval = SYSCTL_OUT(req, &src,
2850
sizeof(struct in6_addr));
2851
if (retval != 0)
2852
break;
2853
}
2854
}
2855
IN6_MULTI_LIST_UNLOCK();
2856
IN6_MULTI_UNLOCK();
2857
NET_EPOCH_EXIT(et);
2858
2859
return (retval);
2860
}
2861
2862
#ifdef KTR
2863
2864
static const char *in6m_modestrs[] = { "un", "in", "ex" };
2865
2866
static const char *
2867
in6m_mode_str(const int mode)
2868
{
2869
2870
if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE)
2871
return (in6m_modestrs[mode]);
2872
return ("??");
2873
}
2874
2875
static const char *in6m_statestrs[] = {
2876
"not-member",
2877
"silent",
2878
"reporting",
2879
"idle",
2880
"lazy",
2881
"sleeping",
2882
"awakening",
2883
"query-pending",
2884
"sg-query-pending",
2885
"leaving"
2886
};
2887
_Static_assert(nitems(in6m_statestrs) ==
2888
MLD_LEAVING_MEMBER - MLD_NOT_MEMBER + 1, "Missing MLD group state");
2889
2890
static const char *
2891
in6m_state_str(const int state)
2892
{
2893
2894
if (state >= MLD_NOT_MEMBER && state <= MLD_LEAVING_MEMBER)
2895
return (in6m_statestrs[state]);
2896
return ("??");
2897
}
2898
2899
/*
2900
* Dump an in6_multi structure to the console.
2901
*/
2902
void
2903
in6m_print(const struct in6_multi *inm)
2904
{
2905
int t;
2906
char ip6tbuf[INET6_ADDRSTRLEN];
2907
2908
if ((ktr_mask & KTR_MLD) == 0)
2909
return;
2910
2911
printf("%s: --- begin in6m %p ---\n", __func__, inm);
2912
printf("addr %s ifp %p(%s) ifma %p\n",
2913
ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2914
inm->in6m_ifp,
2915
if_name(inm->in6m_ifp),
2916
inm->in6m_ifma);
2917
printf("timer %u state %s refcount %u scq.len %u\n",
2918
inm->in6m_timer,
2919
in6m_state_str(inm->in6m_state),
2920
inm->in6m_refcount,
2921
mbufq_len(&inm->in6m_scq));
2922
printf("mli %p nsrc %lu sctimer %u scrv %u\n",
2923
inm->in6m_mli,
2924
inm->in6m_nsrc,
2925
inm->in6m_sctimer,
2926
inm->in6m_scrv);
2927
for (t = 0; t < 2; t++) {
2928
printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t,
2929
in6m_mode_str(inm->in6m_st[t].iss_fmode),
2930
inm->in6m_st[t].iss_asm,
2931
inm->in6m_st[t].iss_ex,
2932
inm->in6m_st[t].iss_in,
2933
inm->in6m_st[t].iss_rec);
2934
}
2935
printf("%s: --- end in6m %p ---\n", __func__, inm);
2936
}
2937
2938
#else /* !KTR */
2939
2940
void
2941
in6m_print(const struct in6_multi *inm)
2942
{
2943
2944
}
2945
2946
#endif /* KTR */
2947
2948