Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/bridge/br_multicast.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Bridge multicast support.
4
*
5
* Copyright (c) 2010 Herbert Xu <[email protected]>
6
*/
7
8
#include <linux/err.h>
9
#include <linux/export.h>
10
#include <linux/if_ether.h>
11
#include <linux/igmp.h>
12
#include <linux/in.h>
13
#include <linux/jhash.h>
14
#include <linux/kernel.h>
15
#include <linux/log2.h>
16
#include <linux/netdevice.h>
17
#include <linux/netfilter_bridge.h>
18
#include <linux/random.h>
19
#include <linux/rculist.h>
20
#include <linux/skbuff.h>
21
#include <linux/slab.h>
22
#include <linux/timer.h>
23
#include <linux/inetdevice.h>
24
#include <linux/mroute.h>
25
#include <net/ip.h>
26
#include <net/switchdev.h>
27
#if IS_ENABLED(CONFIG_IPV6)
28
#include <linux/icmpv6.h>
29
#include <net/ipv6.h>
30
#include <net/mld.h>
31
#include <net/ip6_checksum.h>
32
#include <net/addrconf.h>
33
#endif
34
#include <trace/events/bridge.h>
35
36
#include "br_private.h"
37
#include "br_private_mcast_eht.h"
38
39
static const struct rhashtable_params br_mdb_rht_params = {
40
.head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
41
.key_offset = offsetof(struct net_bridge_mdb_entry, addr),
42
.key_len = sizeof(struct br_ip),
43
.automatic_shrinking = true,
44
};
45
46
static const struct rhashtable_params br_sg_port_rht_params = {
47
.head_offset = offsetof(struct net_bridge_port_group, rhnode),
48
.key_offset = offsetof(struct net_bridge_port_group, key),
49
.key_len = sizeof(struct net_bridge_port_group_sg_key),
50
.automatic_shrinking = true,
51
};
52
53
static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
54
struct bridge_mcast_own_query *query);
55
static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
56
struct net_bridge_mcast_port *pmctx);
57
static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
58
struct net_bridge_mcast_port *pmctx,
59
__be32 group,
60
__u16 vid,
61
const unsigned char *src);
62
static void br_multicast_port_group_rexmit(struct timer_list *t);
63
64
static void
65
br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
66
static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
67
struct net_bridge_mcast_port *pmctx);
68
#if IS_ENABLED(CONFIG_IPV6)
69
static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
70
struct net_bridge_mcast_port *pmctx,
71
const struct in6_addr *group,
72
__u16 vid, const unsigned char *src);
73
#endif
74
static struct net_bridge_port_group *
75
__br_multicast_add_group(struct net_bridge_mcast *brmctx,
76
struct net_bridge_mcast_port *pmctx,
77
struct br_ip *group,
78
const unsigned char *src,
79
u8 filter_mode,
80
bool igmpv2_mldv1,
81
bool blocked);
82
static void br_multicast_find_del_pg(struct net_bridge *br,
83
struct net_bridge_port_group *pg);
84
static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
85
86
static int br_mc_disabled_update(struct net_device *dev, bool value,
87
struct netlink_ext_ack *extack);
88
89
static struct net_bridge_port_group *
90
br_sg_port_find(struct net_bridge *br,
91
struct net_bridge_port_group_sg_key *sg_p)
92
{
93
lockdep_assert_held_once(&br->multicast_lock);
94
95
return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
96
br_sg_port_rht_params);
97
}
98
99
static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
100
struct br_ip *dst)
101
{
102
return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
103
}
104
105
struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
106
struct br_ip *dst)
107
{
108
struct net_bridge_mdb_entry *ent;
109
110
lockdep_assert_held_once(&br->multicast_lock);
111
112
rcu_read_lock();
113
ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
114
rcu_read_unlock();
115
116
return ent;
117
}
118
119
static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
120
__be32 dst, __u16 vid)
121
{
122
struct br_ip br_dst;
123
124
memset(&br_dst, 0, sizeof(br_dst));
125
br_dst.dst.ip4 = dst;
126
br_dst.proto = htons(ETH_P_IP);
127
br_dst.vid = vid;
128
129
return br_mdb_ip_get(br, &br_dst);
130
}
131
132
#if IS_ENABLED(CONFIG_IPV6)
133
static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
134
const struct in6_addr *dst,
135
__u16 vid)
136
{
137
struct br_ip br_dst;
138
139
memset(&br_dst, 0, sizeof(br_dst));
140
br_dst.dst.ip6 = *dst;
141
br_dst.proto = htons(ETH_P_IPV6);
142
br_dst.vid = vid;
143
144
return br_mdb_ip_get(br, &br_dst);
145
}
146
#endif
147
148
struct net_bridge_mdb_entry *
149
br_mdb_entry_skb_get(struct net_bridge_mcast *brmctx, struct sk_buff *skb,
150
u16 vid)
151
{
152
struct net_bridge *br = brmctx->br;
153
struct br_ip ip;
154
155
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
156
br_multicast_ctx_vlan_global_disabled(brmctx))
157
return NULL;
158
159
if (BR_INPUT_SKB_CB(skb)->igmp)
160
return NULL;
161
162
memset(&ip, 0, sizeof(ip));
163
ip.proto = skb->protocol;
164
ip.vid = vid;
165
166
switch (skb->protocol) {
167
case htons(ETH_P_IP):
168
ip.dst.ip4 = ip_hdr(skb)->daddr;
169
if (brmctx->multicast_igmp_version == 3) {
170
struct net_bridge_mdb_entry *mdb;
171
172
ip.src.ip4 = ip_hdr(skb)->saddr;
173
mdb = br_mdb_ip_get_rcu(br, &ip);
174
if (mdb)
175
return mdb;
176
ip.src.ip4 = 0;
177
}
178
break;
179
#if IS_ENABLED(CONFIG_IPV6)
180
case htons(ETH_P_IPV6):
181
ip.dst.ip6 = ipv6_hdr(skb)->daddr;
182
if (brmctx->multicast_mld_version == 2) {
183
struct net_bridge_mdb_entry *mdb;
184
185
ip.src.ip6 = ipv6_hdr(skb)->saddr;
186
mdb = br_mdb_ip_get_rcu(br, &ip);
187
if (mdb)
188
return mdb;
189
memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
190
}
191
break;
192
#endif
193
default:
194
ip.proto = 0;
195
ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
196
}
197
198
return br_mdb_ip_get_rcu(br, &ip);
199
}
200
201
/* IMPORTANT: this function must be used only when the contexts cannot be
202
* passed down (e.g. timer) and must be used for read-only purposes because
203
* the vlan snooping option can change, so it can return any context
204
* (non-vlan or vlan). Its initial intended purpose is to read timer values
205
* from the *current* context based on the option. At worst that could lead
206
* to inconsistent timers when the contexts are changed, i.e. src timer
207
* which needs to re-arm with a specific delay taken from the old context
208
*/
209
static struct net_bridge_mcast_port *
210
br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
211
{
212
struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
213
struct net_bridge_vlan *vlan;
214
215
lockdep_assert_held_once(&pg->key.port->br->multicast_lock);
216
217
/* if vlan snooping is disabled use the port's multicast context */
218
if (!pg->key.addr.vid ||
219
!br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
220
goto out;
221
222
/* locking is tricky here, due to different rules for multicast and
223
* vlans we need to take rcu to find the vlan and make sure it has
224
* the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
225
* multicast_lock which must be already held here, so the vlan's pmctx
226
* can safely be used on return
227
*/
228
rcu_read_lock();
229
vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid);
230
if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
231
pmctx = &vlan->port_mcast_ctx;
232
else
233
pmctx = NULL;
234
rcu_read_unlock();
235
out:
236
return pmctx;
237
}
238
239
static struct net_bridge_mcast_port *
240
br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid)
241
{
242
struct net_bridge_mcast_port *pmctx = NULL;
243
struct net_bridge_vlan *vlan;
244
245
lockdep_assert_held_once(&port->br->multicast_lock);
246
247
if (!br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
248
return NULL;
249
250
/* Take RCU to access the vlan. */
251
rcu_read_lock();
252
253
vlan = br_vlan_find(nbp_vlan_group_rcu(port), vid);
254
if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
255
pmctx = &vlan->port_mcast_ctx;
256
257
rcu_read_unlock();
258
259
return pmctx;
260
}
261
262
/* when snooping we need to check if the contexts should be used
263
* in the following order:
264
* - if pmctx is non-NULL (port), check if it should be used
265
* - if pmctx is NULL (bridge), check if brmctx should be used
266
*/
267
static bool
268
br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
269
const struct net_bridge_mcast_port *pmctx)
270
{
271
if (!netif_running(brmctx->br->dev))
272
return false;
273
274
if (pmctx)
275
return !br_multicast_port_ctx_state_disabled(pmctx);
276
else
277
return !br_multicast_ctx_vlan_disabled(brmctx);
278
}
279
280
static bool br_port_group_equal(struct net_bridge_port_group *p,
281
struct net_bridge_port *port,
282
const unsigned char *src)
283
{
284
if (p->key.port != port)
285
return false;
286
287
if (!(port->flags & BR_MULTICAST_TO_UNICAST))
288
return true;
289
290
return ether_addr_equal(src, p->eth_addr);
291
}
292
293
static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
294
struct net_bridge_port_group *pg,
295
struct br_ip *sg_ip)
296
{
297
struct net_bridge_port_group_sg_key sg_key;
298
struct net_bridge_port_group *src_pg;
299
struct net_bridge_mcast *brmctx;
300
301
memset(&sg_key, 0, sizeof(sg_key));
302
brmctx = br_multicast_port_ctx_get_global(pmctx);
303
sg_key.port = pg->key.port;
304
sg_key.addr = *sg_ip;
305
if (br_sg_port_find(brmctx->br, &sg_key))
306
return;
307
308
src_pg = __br_multicast_add_group(brmctx, pmctx,
309
sg_ip, pg->eth_addr,
310
MCAST_INCLUDE, false, false);
311
if (IS_ERR_OR_NULL(src_pg) ||
312
src_pg->rt_protocol != RTPROT_KERNEL)
313
return;
314
315
src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
316
}
317
318
static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
319
struct br_ip *sg_ip)
320
{
321
struct net_bridge_port_group_sg_key sg_key;
322
struct net_bridge *br = pg->key.port->br;
323
struct net_bridge_port_group *src_pg;
324
325
memset(&sg_key, 0, sizeof(sg_key));
326
sg_key.port = pg->key.port;
327
sg_key.addr = *sg_ip;
328
src_pg = br_sg_port_find(br, &sg_key);
329
if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
330
src_pg->rt_protocol != RTPROT_KERNEL)
331
return;
332
333
br_multicast_find_del_pg(br, src_pg);
334
}
335
336
/* When a port group transitions to (or is added as) EXCLUDE we need to add it
337
* to all other ports' S,G entries which are not blocked by the current group
338
* for proper replication, the assumption is that any S,G blocked entries
339
* are already added so the S,G,port lookup should skip them.
340
* When a port group transitions from EXCLUDE -> INCLUDE mode or is being
341
* deleted we need to remove it from all ports' S,G entries where it was
342
* automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
343
*/
344
void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
345
u8 filter_mode)
346
{
347
struct net_bridge *br = pg->key.port->br;
348
struct net_bridge_port_group *pg_lst;
349
struct net_bridge_mcast_port *pmctx;
350
struct net_bridge_mdb_entry *mp;
351
struct br_ip sg_ip;
352
353
if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
354
return;
355
356
mp = br_mdb_ip_get(br, &pg->key.addr);
357
if (!mp)
358
return;
359
pmctx = br_multicast_pg_to_port_ctx(pg);
360
if (!pmctx)
361
return;
362
363
memset(&sg_ip, 0, sizeof(sg_ip));
364
sg_ip = pg->key.addr;
365
366
for (pg_lst = mlock_dereference(mp->ports, br);
367
pg_lst;
368
pg_lst = mlock_dereference(pg_lst->next, br)) {
369
struct net_bridge_group_src *src_ent;
370
371
if (pg_lst == pg)
372
continue;
373
hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
374
if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
375
continue;
376
sg_ip.src = src_ent->addr.src;
377
switch (filter_mode) {
378
case MCAST_INCLUDE:
379
__fwd_del_star_excl(pg, &sg_ip);
380
break;
381
case MCAST_EXCLUDE:
382
__fwd_add_star_excl(pmctx, pg, &sg_ip);
383
break;
384
}
385
}
386
}
387
}
388
389
/* called when adding a new S,G with host_joined == false by default */
390
static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
391
struct net_bridge_port_group *sg)
392
{
393
struct net_bridge_mdb_entry *sg_mp;
394
395
if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
396
return;
397
if (!star_mp->host_joined)
398
return;
399
400
sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
401
if (!sg_mp)
402
return;
403
sg_mp->host_joined = true;
404
}
405
406
/* set the host_joined state of all of *,G's S,G entries */
407
static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
408
{
409
struct net_bridge *br = star_mp->br;
410
struct net_bridge_mdb_entry *sg_mp;
411
struct net_bridge_port_group *pg;
412
struct br_ip sg_ip;
413
414
if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
415
return;
416
417
memset(&sg_ip, 0, sizeof(sg_ip));
418
sg_ip = star_mp->addr;
419
for (pg = mlock_dereference(star_mp->ports, br);
420
pg;
421
pg = mlock_dereference(pg->next, br)) {
422
struct net_bridge_group_src *src_ent;
423
424
hlist_for_each_entry(src_ent, &pg->src_list, node) {
425
if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
426
continue;
427
sg_ip.src = src_ent->addr.src;
428
sg_mp = br_mdb_ip_get(br, &sg_ip);
429
if (!sg_mp)
430
continue;
431
sg_mp->host_joined = star_mp->host_joined;
432
}
433
}
434
}
435
436
static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
437
{
438
struct net_bridge_port_group __rcu **pp;
439
struct net_bridge_port_group *p;
440
441
/* *,G exclude ports are only added to S,G entries */
442
if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
443
return;
444
445
/* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
446
* we should ignore perm entries since they're managed by user-space
447
*/
448
for (pp = &sgmp->ports;
449
(p = mlock_dereference(*pp, sgmp->br)) != NULL;
450
pp = &p->next)
451
if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
452
MDB_PG_FLAGS_PERMANENT)))
453
return;
454
455
/* currently the host can only have joined the *,G which means
456
* we treat it as EXCLUDE {}, so for an S,G it's considered a
457
* STAR_EXCLUDE entry and we can safely leave it
458
*/
459
sgmp->host_joined = false;
460
461
for (pp = &sgmp->ports;
462
(p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
463
if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
464
br_multicast_del_pg(sgmp, p, pp);
465
else
466
pp = &p->next;
467
}
468
}
469
470
void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
471
struct net_bridge_port_group *sg)
472
{
473
struct net_bridge_port_group_sg_key sg_key;
474
struct net_bridge *br = star_mp->br;
475
struct net_bridge_mcast_port *pmctx;
476
struct net_bridge_port_group *pg;
477
struct net_bridge_mcast *brmctx;
478
479
if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
480
return;
481
if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
482
return;
483
484
br_multicast_sg_host_state(star_mp, sg);
485
memset(&sg_key, 0, sizeof(sg_key));
486
sg_key.addr = sg->key.addr;
487
/* we need to add all exclude ports to the S,G */
488
for (pg = mlock_dereference(star_mp->ports, br);
489
pg;
490
pg = mlock_dereference(pg->next, br)) {
491
struct net_bridge_port_group *src_pg;
492
493
if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
494
continue;
495
496
sg_key.port = pg->key.port;
497
if (br_sg_port_find(br, &sg_key))
498
continue;
499
500
pmctx = br_multicast_pg_to_port_ctx(pg);
501
if (!pmctx)
502
continue;
503
brmctx = br_multicast_port_ctx_get_global(pmctx);
504
505
src_pg = __br_multicast_add_group(brmctx, pmctx,
506
&sg->key.addr,
507
sg->eth_addr,
508
MCAST_INCLUDE, false, false);
509
if (IS_ERR_OR_NULL(src_pg) ||
510
src_pg->rt_protocol != RTPROT_KERNEL)
511
continue;
512
src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
513
}
514
}
515
516
static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
517
{
518
struct net_bridge_mdb_entry *star_mp;
519
struct net_bridge_mcast_port *pmctx;
520
struct net_bridge_port_group *sg;
521
struct net_bridge_mcast *brmctx;
522
struct br_ip sg_ip;
523
524
if (src->flags & BR_SGRP_F_INSTALLED)
525
return;
526
527
memset(&sg_ip, 0, sizeof(sg_ip));
528
pmctx = br_multicast_pg_to_port_ctx(src->pg);
529
if (!pmctx)
530
return;
531
brmctx = br_multicast_port_ctx_get_global(pmctx);
532
sg_ip = src->pg->key.addr;
533
sg_ip.src = src->addr.src;
534
535
sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
536
src->pg->eth_addr, MCAST_INCLUDE, false,
537
!timer_pending(&src->timer));
538
if (IS_ERR_OR_NULL(sg))
539
return;
540
src->flags |= BR_SGRP_F_INSTALLED;
541
sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
542
543
/* if it was added by user-space as perm we can skip next steps */
544
if (sg->rt_protocol != RTPROT_KERNEL &&
545
(sg->flags & MDB_PG_FLAGS_PERMANENT))
546
return;
547
548
/* the kernel is now responsible for removing this S,G */
549
timer_delete(&sg->timer);
550
star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
551
if (!star_mp)
552
return;
553
554
br_multicast_sg_add_exclude_ports(star_mp, sg);
555
}
556
557
static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
558
bool fastleave)
559
{
560
struct net_bridge_port_group *p, *pg = src->pg;
561
struct net_bridge_port_group __rcu **pp;
562
struct net_bridge_mdb_entry *mp;
563
struct br_ip sg_ip;
564
565
memset(&sg_ip, 0, sizeof(sg_ip));
566
sg_ip = pg->key.addr;
567
sg_ip.src = src->addr.src;
568
569
mp = br_mdb_ip_get(src->br, &sg_ip);
570
if (!mp)
571
return;
572
573
for (pp = &mp->ports;
574
(p = mlock_dereference(*pp, src->br)) != NULL;
575
pp = &p->next) {
576
if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
577
continue;
578
579
if (p->rt_protocol != RTPROT_KERNEL &&
580
(p->flags & MDB_PG_FLAGS_PERMANENT) &&
581
!(src->flags & BR_SGRP_F_USER_ADDED))
582
break;
583
584
if (fastleave)
585
p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
586
br_multicast_del_pg(mp, p, pp);
587
break;
588
}
589
src->flags &= ~BR_SGRP_F_INSTALLED;
590
}
591
592
/* install S,G and based on src's timer enable or disable forwarding */
593
static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
594
{
595
struct net_bridge_port_group_sg_key sg_key;
596
struct net_bridge_port_group *sg;
597
u8 old_flags;
598
599
br_multicast_fwd_src_add(src);
600
601
memset(&sg_key, 0, sizeof(sg_key));
602
sg_key.addr = src->pg->key.addr;
603
sg_key.addr.src = src->addr.src;
604
sg_key.port = src->pg->key.port;
605
606
sg = br_sg_port_find(src->br, &sg_key);
607
if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
608
return;
609
610
old_flags = sg->flags;
611
if (timer_pending(&src->timer))
612
sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
613
else
614
sg->flags |= MDB_PG_FLAGS_BLOCKED;
615
616
if (old_flags != sg->flags) {
617
struct net_bridge_mdb_entry *sg_mp;
618
619
sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
620
if (!sg_mp)
621
return;
622
br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
623
}
624
}
625
626
static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
627
{
628
struct net_bridge_mdb_entry *mp;
629
630
mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
631
WARN_ON(!hlist_unhashed(&mp->mdb_node));
632
WARN_ON(mp->ports);
633
634
timer_shutdown_sync(&mp->timer);
635
kfree_rcu(mp, rcu);
636
}
637
638
static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
639
{
640
struct net_bridge *br = mp->br;
641
642
rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
643
br_mdb_rht_params);
644
hlist_del_init_rcu(&mp->mdb_node);
645
hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
646
queue_work(system_long_wq, &br->mcast_gc_work);
647
}
648
649
static void br_multicast_group_expired(struct timer_list *t)
650
{
651
struct net_bridge_mdb_entry *mp = timer_container_of(mp, t, timer);
652
struct net_bridge *br = mp->br;
653
654
spin_lock(&br->multicast_lock);
655
if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
656
timer_pending(&mp->timer))
657
goto out;
658
659
br_multicast_host_leave(mp, true);
660
661
if (mp->ports)
662
goto out;
663
br_multicast_del_mdb_entry(mp);
664
out:
665
spin_unlock(&br->multicast_lock);
666
}
667
668
static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
669
{
670
struct net_bridge_group_src *src;
671
672
src = container_of(gc, struct net_bridge_group_src, mcast_gc);
673
WARN_ON(!hlist_unhashed(&src->node));
674
675
timer_shutdown_sync(&src->timer);
676
kfree_rcu(src, rcu);
677
}
678
679
void __br_multicast_del_group_src(struct net_bridge_group_src *src)
680
{
681
struct net_bridge *br = src->pg->key.port->br;
682
683
hlist_del_init_rcu(&src->node);
684
src->pg->src_ents--;
685
hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
686
queue_work(system_long_wq, &br->mcast_gc_work);
687
}
688
689
void br_multicast_del_group_src(struct net_bridge_group_src *src,
690
bool fastleave)
691
{
692
br_multicast_fwd_src_remove(src, fastleave);
693
__br_multicast_del_group_src(src);
694
}
695
696
static int
697
br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx,
698
struct netlink_ext_ack *extack,
699
const char *what)
700
{
701
u32 max = READ_ONCE(pmctx->mdb_max_entries);
702
u32 n = READ_ONCE(pmctx->mdb_n_entries);
703
704
if (max && n >= max) {
705
NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u",
706
what, n, max);
707
return -E2BIG;
708
}
709
710
WRITE_ONCE(pmctx->mdb_n_entries, n + 1);
711
return 0;
712
}
713
714
static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx)
715
{
716
u32 n = READ_ONCE(pmctx->mdb_n_entries);
717
718
WARN_ON_ONCE(n == 0);
719
WRITE_ONCE(pmctx->mdb_n_entries, n - 1);
720
}
721
722
static int br_multicast_port_ngroups_inc(struct net_bridge_port *port,
723
const struct br_ip *group,
724
struct netlink_ext_ack *extack)
725
{
726
struct net_bridge_mcast_port *pmctx;
727
int err;
728
729
lockdep_assert_held_once(&port->br->multicast_lock);
730
731
/* Always count on the port context. */
732
err = br_multicast_port_ngroups_inc_one(&port->multicast_ctx, extack,
733
"Port");
734
if (err) {
735
trace_br_mdb_full(port->dev, group);
736
return err;
737
}
738
739
/* Only count on the VLAN context if VID is given, and if snooping on
740
* that VLAN is enabled.
741
*/
742
if (!group->vid)
743
return 0;
744
745
pmctx = br_multicast_port_vid_to_port_ctx(port, group->vid);
746
if (!pmctx)
747
return 0;
748
749
err = br_multicast_port_ngroups_inc_one(pmctx, extack, "Port-VLAN");
750
if (err) {
751
trace_br_mdb_full(port->dev, group);
752
goto dec_one_out;
753
}
754
755
return 0;
756
757
dec_one_out:
758
br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
759
return err;
760
}
761
762
static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid)
763
{
764
struct net_bridge_mcast_port *pmctx;
765
766
lockdep_assert_held_once(&port->br->multicast_lock);
767
768
if (vid) {
769
pmctx = br_multicast_port_vid_to_port_ctx(port, vid);
770
if (pmctx)
771
br_multicast_port_ngroups_dec_one(pmctx);
772
}
773
br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
774
}
775
776
u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx)
777
{
778
return READ_ONCE(pmctx->mdb_n_entries);
779
}
780
781
void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max)
782
{
783
WRITE_ONCE(pmctx->mdb_max_entries, max);
784
}
785
786
u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx)
787
{
788
return READ_ONCE(pmctx->mdb_max_entries);
789
}
790
791
static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
792
{
793
struct net_bridge_port_group *pg;
794
795
pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
796
WARN_ON(!hlist_unhashed(&pg->mglist));
797
WARN_ON(!hlist_empty(&pg->src_list));
798
799
timer_shutdown_sync(&pg->rexmit_timer);
800
timer_shutdown_sync(&pg->timer);
801
kfree_rcu(pg, rcu);
802
}
803
804
void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
805
struct net_bridge_port_group *pg,
806
struct net_bridge_port_group __rcu **pp)
807
{
808
struct net_bridge *br = pg->key.port->br;
809
struct net_bridge_group_src *ent;
810
struct hlist_node *tmp;
811
812
rcu_assign_pointer(*pp, pg->next);
813
hlist_del_init(&pg->mglist);
814
br_multicast_eht_clean_sets(pg);
815
hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
816
br_multicast_del_group_src(ent, false);
817
br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
818
if (!br_multicast_is_star_g(&mp->addr)) {
819
rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
820
br_sg_port_rht_params);
821
br_multicast_sg_del_exclude_ports(mp);
822
} else {
823
br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
824
}
825
br_multicast_port_ngroups_dec(pg->key.port, pg->key.addr.vid);
826
hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
827
queue_work(system_long_wq, &br->mcast_gc_work);
828
829
if (!mp->ports && !mp->host_joined && netif_running(br->dev))
830
mod_timer(&mp->timer, jiffies);
831
}
832
833
static void br_multicast_find_del_pg(struct net_bridge *br,
834
struct net_bridge_port_group *pg)
835
{
836
struct net_bridge_port_group __rcu **pp;
837
struct net_bridge_mdb_entry *mp;
838
struct net_bridge_port_group *p;
839
840
mp = br_mdb_ip_get(br, &pg->key.addr);
841
if (WARN_ON(!mp))
842
return;
843
844
for (pp = &mp->ports;
845
(p = mlock_dereference(*pp, br)) != NULL;
846
pp = &p->next) {
847
if (p != pg)
848
continue;
849
850
br_multicast_del_pg(mp, pg, pp);
851
return;
852
}
853
854
WARN_ON(1);
855
}
856
857
static void br_multicast_port_group_expired(struct timer_list *t)
858
{
859
struct net_bridge_port_group *pg = timer_container_of(pg, t, timer);
860
struct net_bridge_group_src *src_ent;
861
struct net_bridge *br = pg->key.port->br;
862
struct hlist_node *tmp;
863
bool changed;
864
865
spin_lock(&br->multicast_lock);
866
if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
867
hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
868
goto out;
869
870
changed = !!(pg->filter_mode == MCAST_EXCLUDE);
871
pg->filter_mode = MCAST_INCLUDE;
872
hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
873
if (!timer_pending(&src_ent->timer)) {
874
br_multicast_del_group_src(src_ent, false);
875
changed = true;
876
}
877
}
878
879
if (hlist_empty(&pg->src_list)) {
880
br_multicast_find_del_pg(br, pg);
881
} else if (changed) {
882
struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
883
884
if (changed && br_multicast_is_star_g(&pg->key.addr))
885
br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
886
887
if (WARN_ON(!mp))
888
goto out;
889
br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
890
}
891
out:
892
spin_unlock(&br->multicast_lock);
893
}
894
895
static void br_multicast_gc(struct hlist_head *head)
896
{
897
struct net_bridge_mcast_gc *gcent;
898
struct hlist_node *tmp;
899
900
hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
901
hlist_del_init(&gcent->gc_node);
902
gcent->destroy(gcent);
903
}
904
}
905
906
static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
907
struct net_bridge_mcast_port *pmctx,
908
struct sk_buff *skb)
909
{
910
struct net_bridge_vlan *vlan = NULL;
911
912
if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
913
vlan = pmctx->vlan;
914
else if (br_multicast_ctx_is_vlan(brmctx))
915
vlan = brmctx->vlan;
916
917
if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
918
u16 vlan_proto;
919
920
if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
921
return;
922
__vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
923
}
924
}
925
926
static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
927
struct net_bridge_mcast_port *pmctx,
928
struct net_bridge_port_group *pg,
929
__be32 ip_dst, __be32 group,
930
bool with_srcs, bool over_lmqt,
931
u8 sflag, u8 *igmp_type,
932
bool *need_rexmit)
933
{
934
struct net_bridge_port *p = pg ? pg->key.port : NULL;
935
struct net_bridge_group_src *ent;
936
size_t pkt_size, igmp_hdr_size;
937
unsigned long now = jiffies;
938
struct igmpv3_query *ihv3;
939
void *csum_start = NULL;
940
__sum16 *csum = NULL;
941
struct sk_buff *skb;
942
struct igmphdr *ih;
943
struct ethhdr *eth;
944
unsigned long lmqt;
945
struct iphdr *iph;
946
u16 lmqt_srcs = 0;
947
948
igmp_hdr_size = sizeof(*ih);
949
if (brmctx->multicast_igmp_version == 3) {
950
igmp_hdr_size = sizeof(*ihv3);
951
if (pg && with_srcs) {
952
lmqt = now + (brmctx->multicast_last_member_interval *
953
brmctx->multicast_last_member_count);
954
hlist_for_each_entry(ent, &pg->src_list, node) {
955
if (over_lmqt == time_after(ent->timer.expires,
956
lmqt) &&
957
ent->src_query_rexmit_cnt > 0)
958
lmqt_srcs++;
959
}
960
961
if (!lmqt_srcs)
962
return NULL;
963
igmp_hdr_size += lmqt_srcs * sizeof(__be32);
964
}
965
}
966
967
pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
968
if ((p && pkt_size > p->dev->mtu) ||
969
pkt_size > brmctx->br->dev->mtu)
970
return NULL;
971
972
skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
973
if (!skb)
974
goto out;
975
976
__br_multicast_query_handle_vlan(brmctx, pmctx, skb);
977
skb->protocol = htons(ETH_P_IP);
978
979
skb_reset_mac_header(skb);
980
eth = eth_hdr(skb);
981
982
ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
983
ip_eth_mc_map(ip_dst, eth->h_dest);
984
eth->h_proto = htons(ETH_P_IP);
985
skb_put(skb, sizeof(*eth));
986
987
skb_set_network_header(skb, skb->len);
988
iph = ip_hdr(skb);
989
iph->tot_len = htons(pkt_size - sizeof(*eth));
990
991
iph->version = 4;
992
iph->ihl = 6;
993
iph->tos = 0xc0;
994
iph->id = 0;
995
iph->frag_off = htons(IP_DF);
996
iph->ttl = 1;
997
iph->protocol = IPPROTO_IGMP;
998
iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
999
inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
1000
iph->daddr = ip_dst;
1001
((u8 *)&iph[1])[0] = IPOPT_RA;
1002
((u8 *)&iph[1])[1] = 4;
1003
((u8 *)&iph[1])[2] = 0;
1004
((u8 *)&iph[1])[3] = 0;
1005
ip_send_check(iph);
1006
skb_put(skb, 24);
1007
1008
skb_set_transport_header(skb, skb->len);
1009
*igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
1010
1011
switch (brmctx->multicast_igmp_version) {
1012
case 2:
1013
ih = igmp_hdr(skb);
1014
ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
1015
ih->code = (group ? brmctx->multicast_last_member_interval :
1016
brmctx->multicast_query_response_interval) /
1017
(HZ / IGMP_TIMER_SCALE);
1018
ih->group = group;
1019
ih->csum = 0;
1020
csum = &ih->csum;
1021
csum_start = (void *)ih;
1022
break;
1023
case 3:
1024
ihv3 = igmpv3_query_hdr(skb);
1025
ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
1026
ihv3->code = (group ? brmctx->multicast_last_member_interval :
1027
brmctx->multicast_query_response_interval) /
1028
(HZ / IGMP_TIMER_SCALE);
1029
ihv3->group = group;
1030
ihv3->qqic = brmctx->multicast_query_interval / HZ;
1031
ihv3->nsrcs = htons(lmqt_srcs);
1032
ihv3->resv = 0;
1033
ihv3->suppress = sflag;
1034
ihv3->qrv = 2;
1035
ihv3->csum = 0;
1036
csum = &ihv3->csum;
1037
csum_start = (void *)ihv3;
1038
if (!pg || !with_srcs)
1039
break;
1040
1041
lmqt_srcs = 0;
1042
hlist_for_each_entry(ent, &pg->src_list, node) {
1043
if (over_lmqt == time_after(ent->timer.expires,
1044
lmqt) &&
1045
ent->src_query_rexmit_cnt > 0) {
1046
ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
1047
ent->src_query_rexmit_cnt--;
1048
if (need_rexmit && ent->src_query_rexmit_cnt)
1049
*need_rexmit = true;
1050
}
1051
}
1052
if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
1053
kfree_skb(skb);
1054
return NULL;
1055
}
1056
break;
1057
}
1058
1059
if (WARN_ON(!csum || !csum_start)) {
1060
kfree_skb(skb);
1061
return NULL;
1062
}
1063
1064
*csum = ip_compute_csum(csum_start, igmp_hdr_size);
1065
skb_put(skb, igmp_hdr_size);
1066
__skb_pull(skb, sizeof(*eth));
1067
1068
out:
1069
return skb;
1070
}
1071
1072
#if IS_ENABLED(CONFIG_IPV6)
1073
static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
1074
struct net_bridge_mcast_port *pmctx,
1075
struct net_bridge_port_group *pg,
1076
const struct in6_addr *ip6_dst,
1077
const struct in6_addr *group,
1078
bool with_srcs, bool over_llqt,
1079
u8 sflag, u8 *igmp_type,
1080
bool *need_rexmit)
1081
{
1082
struct net_bridge_port *p = pg ? pg->key.port : NULL;
1083
struct net_bridge_group_src *ent;
1084
size_t pkt_size, mld_hdr_size;
1085
unsigned long now = jiffies;
1086
struct mld2_query *mld2q;
1087
void *csum_start = NULL;
1088
unsigned long interval;
1089
__sum16 *csum = NULL;
1090
struct ipv6hdr *ip6h;
1091
struct mld_msg *mldq;
1092
struct sk_buff *skb;
1093
unsigned long llqt;
1094
struct ethhdr *eth;
1095
u16 llqt_srcs = 0;
1096
u8 *hopopt;
1097
1098
mld_hdr_size = sizeof(*mldq);
1099
if (brmctx->multicast_mld_version == 2) {
1100
mld_hdr_size = sizeof(*mld2q);
1101
if (pg && with_srcs) {
1102
llqt = now + (brmctx->multicast_last_member_interval *
1103
brmctx->multicast_last_member_count);
1104
hlist_for_each_entry(ent, &pg->src_list, node) {
1105
if (over_llqt == time_after(ent->timer.expires,
1106
llqt) &&
1107
ent->src_query_rexmit_cnt > 0)
1108
llqt_srcs++;
1109
}
1110
1111
if (!llqt_srcs)
1112
return NULL;
1113
mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
1114
}
1115
}
1116
1117
pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
1118
if ((p && pkt_size > p->dev->mtu) ||
1119
pkt_size > brmctx->br->dev->mtu)
1120
return NULL;
1121
1122
skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
1123
if (!skb)
1124
goto out;
1125
1126
__br_multicast_query_handle_vlan(brmctx, pmctx, skb);
1127
skb->protocol = htons(ETH_P_IPV6);
1128
1129
/* Ethernet header */
1130
skb_reset_mac_header(skb);
1131
eth = eth_hdr(skb);
1132
1133
ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
1134
eth->h_proto = htons(ETH_P_IPV6);
1135
skb_put(skb, sizeof(*eth));
1136
1137
/* IPv6 header + HbH option */
1138
skb_set_network_header(skb, skb->len);
1139
ip6h = ipv6_hdr(skb);
1140
1141
*(__force __be32 *)ip6h = htonl(0x60000000);
1142
ip6h->payload_len = htons(8 + mld_hdr_size);
1143
ip6h->nexthdr = IPPROTO_HOPOPTS;
1144
ip6h->hop_limit = 1;
1145
ip6h->daddr = *ip6_dst;
1146
if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
1147
&ip6h->daddr, 0, &ip6h->saddr)) {
1148
kfree_skb(skb);
1149
br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
1150
return NULL;
1151
}
1152
1153
br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
1154
ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
1155
1156
hopopt = (u8 *)(ip6h + 1);
1157
hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
1158
hopopt[1] = 0; /* length of HbH */
1159
hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
1160
hopopt[3] = 2; /* Length of RA Option */
1161
hopopt[4] = 0; /* Type = 0x0000 (MLD) */
1162
hopopt[5] = 0;
1163
hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
1164
hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
1165
1166
skb_put(skb, sizeof(*ip6h) + 8);
1167
1168
/* ICMPv6 */
1169
skb_set_transport_header(skb, skb->len);
1170
interval = ipv6_addr_any(group) ?
1171
brmctx->multicast_query_response_interval :
1172
brmctx->multicast_last_member_interval;
1173
*igmp_type = ICMPV6_MGM_QUERY;
1174
switch (brmctx->multicast_mld_version) {
1175
case 1:
1176
mldq = (struct mld_msg *)icmp6_hdr(skb);
1177
mldq->mld_type = ICMPV6_MGM_QUERY;
1178
mldq->mld_code = 0;
1179
mldq->mld_cksum = 0;
1180
mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
1181
mldq->mld_reserved = 0;
1182
mldq->mld_mca = *group;
1183
csum = &mldq->mld_cksum;
1184
csum_start = (void *)mldq;
1185
break;
1186
case 2:
1187
mld2q = (struct mld2_query *)icmp6_hdr(skb);
1188
mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
1189
mld2q->mld2q_type = ICMPV6_MGM_QUERY;
1190
mld2q->mld2q_code = 0;
1191
mld2q->mld2q_cksum = 0;
1192
mld2q->mld2q_resv1 = 0;
1193
mld2q->mld2q_resv2 = 0;
1194
mld2q->mld2q_suppress = sflag;
1195
mld2q->mld2q_qrv = 2;
1196
mld2q->mld2q_nsrcs = htons(llqt_srcs);
1197
mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
1198
mld2q->mld2q_mca = *group;
1199
csum = &mld2q->mld2q_cksum;
1200
csum_start = (void *)mld2q;
1201
if (!pg || !with_srcs)
1202
break;
1203
1204
llqt_srcs = 0;
1205
hlist_for_each_entry(ent, &pg->src_list, node) {
1206
if (over_llqt == time_after(ent->timer.expires,
1207
llqt) &&
1208
ent->src_query_rexmit_cnt > 0) {
1209
mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
1210
ent->src_query_rexmit_cnt--;
1211
if (need_rexmit && ent->src_query_rexmit_cnt)
1212
*need_rexmit = true;
1213
}
1214
}
1215
if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
1216
kfree_skb(skb);
1217
return NULL;
1218
}
1219
break;
1220
}
1221
1222
if (WARN_ON(!csum || !csum_start)) {
1223
kfree_skb(skb);
1224
return NULL;
1225
}
1226
1227
*csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
1228
IPPROTO_ICMPV6,
1229
csum_partial(csum_start, mld_hdr_size, 0));
1230
skb_put(skb, mld_hdr_size);
1231
__skb_pull(skb, sizeof(*eth));
1232
1233
out:
1234
return skb;
1235
}
1236
#endif
1237
1238
static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
1239
struct net_bridge_mcast_port *pmctx,
1240
struct net_bridge_port_group *pg,
1241
struct br_ip *ip_dst,
1242
struct br_ip *group,
1243
bool with_srcs, bool over_lmqt,
1244
u8 sflag, u8 *igmp_type,
1245
bool *need_rexmit)
1246
{
1247
__be32 ip4_dst;
1248
1249
switch (group->proto) {
1250
case htons(ETH_P_IP):
1251
ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
1252
return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
1253
ip4_dst, group->dst.ip4,
1254
with_srcs, over_lmqt,
1255
sflag, igmp_type,
1256
need_rexmit);
1257
#if IS_ENABLED(CONFIG_IPV6)
1258
case htons(ETH_P_IPV6): {
1259
struct in6_addr ip6_dst;
1260
1261
if (ip_dst)
1262
ip6_dst = ip_dst->dst.ip6;
1263
else
1264
ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
1265
htonl(1));
1266
1267
return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
1268
&ip6_dst, &group->dst.ip6,
1269
with_srcs, over_lmqt,
1270
sflag, igmp_type,
1271
need_rexmit);
1272
}
1273
#endif
1274
}
1275
return NULL;
1276
}
1277
1278
struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
1279
struct br_ip *group)
1280
{
1281
struct net_bridge_mdb_entry *mp;
1282
int err;
1283
1284
mp = br_mdb_ip_get(br, group);
1285
if (mp)
1286
return mp;
1287
1288
if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
1289
trace_br_mdb_full(br->dev, group);
1290
br_mc_disabled_update(br->dev, false, NULL);
1291
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
1292
return ERR_PTR(-E2BIG);
1293
}
1294
1295
mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
1296
if (unlikely(!mp))
1297
return ERR_PTR(-ENOMEM);
1298
1299
mp->br = br;
1300
mp->addr = *group;
1301
mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
1302
timer_setup(&mp->timer, br_multicast_group_expired, 0);
1303
err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
1304
br_mdb_rht_params);
1305
if (err) {
1306
kfree(mp);
1307
mp = ERR_PTR(err);
1308
} else {
1309
hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
1310
}
1311
1312
return mp;
1313
}
1314
1315
static void br_multicast_group_src_expired(struct timer_list *t)
1316
{
1317
struct net_bridge_group_src *src = timer_container_of(src, t, timer);
1318
struct net_bridge_port_group *pg;
1319
struct net_bridge *br = src->br;
1320
1321
spin_lock(&br->multicast_lock);
1322
if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
1323
timer_pending(&src->timer))
1324
goto out;
1325
1326
pg = src->pg;
1327
if (pg->filter_mode == MCAST_INCLUDE) {
1328
br_multicast_del_group_src(src, false);
1329
if (!hlist_empty(&pg->src_list))
1330
goto out;
1331
br_multicast_find_del_pg(br, pg);
1332
} else {
1333
br_multicast_fwd_src_handle(src);
1334
}
1335
1336
out:
1337
spin_unlock(&br->multicast_lock);
1338
}
1339
1340
struct net_bridge_group_src *
1341
br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
1342
{
1343
struct net_bridge_group_src *ent;
1344
1345
switch (ip->proto) {
1346
case htons(ETH_P_IP):
1347
hlist_for_each_entry(ent, &pg->src_list, node)
1348
if (ip->src.ip4 == ent->addr.src.ip4)
1349
return ent;
1350
break;
1351
#if IS_ENABLED(CONFIG_IPV6)
1352
case htons(ETH_P_IPV6):
1353
hlist_for_each_entry(ent, &pg->src_list, node)
1354
if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
1355
return ent;
1356
break;
1357
#endif
1358
}
1359
1360
return NULL;
1361
}
1362
1363
struct net_bridge_group_src *
1364
br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
1365
{
1366
struct net_bridge_group_src *grp_src;
1367
1368
if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
1369
return NULL;
1370
1371
switch (src_ip->proto) {
1372
case htons(ETH_P_IP):
1373
if (ipv4_is_zeronet(src_ip->src.ip4) ||
1374
ipv4_is_multicast(src_ip->src.ip4))
1375
return NULL;
1376
break;
1377
#if IS_ENABLED(CONFIG_IPV6)
1378
case htons(ETH_P_IPV6):
1379
if (ipv6_addr_any(&src_ip->src.ip6) ||
1380
ipv6_addr_is_multicast(&src_ip->src.ip6))
1381
return NULL;
1382
break;
1383
#endif
1384
}
1385
1386
grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
1387
if (unlikely(!grp_src))
1388
return NULL;
1389
1390
grp_src->pg = pg;
1391
grp_src->br = pg->key.port->br;
1392
grp_src->addr = *src_ip;
1393
grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
1394
timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
1395
1396
hlist_add_head_rcu(&grp_src->node, &pg->src_list);
1397
pg->src_ents++;
1398
1399
return grp_src;
1400
}
1401
1402
struct net_bridge_port_group *br_multicast_new_port_group(
1403
struct net_bridge_port *port,
1404
const struct br_ip *group,
1405
struct net_bridge_port_group __rcu *next,
1406
unsigned char flags,
1407
const unsigned char *src,
1408
u8 filter_mode,
1409
u8 rt_protocol,
1410
struct netlink_ext_ack *extack)
1411
{
1412
struct net_bridge_port_group *p;
1413
int err;
1414
1415
err = br_multicast_port_ngroups_inc(port, group, extack);
1416
if (err)
1417
return NULL;
1418
1419
p = kzalloc(sizeof(*p), GFP_ATOMIC);
1420
if (unlikely(!p)) {
1421
NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
1422
goto dec_out;
1423
}
1424
1425
p->key.addr = *group;
1426
p->key.port = port;
1427
p->flags = flags;
1428
p->filter_mode = filter_mode;
1429
p->rt_protocol = rt_protocol;
1430
p->eht_host_tree = RB_ROOT;
1431
p->eht_set_tree = RB_ROOT;
1432
p->mcast_gc.destroy = br_multicast_destroy_port_group;
1433
INIT_HLIST_HEAD(&p->src_list);
1434
1435
if (!br_multicast_is_star_g(group) &&
1436
rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
1437
br_sg_port_rht_params)) {
1438
NL_SET_ERR_MSG_MOD(extack, "Couldn't insert new port group");
1439
goto free_out;
1440
}
1441
1442
rcu_assign_pointer(p->next, next);
1443
timer_setup(&p->timer, br_multicast_port_group_expired, 0);
1444
timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
1445
hlist_add_head(&p->mglist, &port->mglist);
1446
1447
if (src)
1448
memcpy(p->eth_addr, src, ETH_ALEN);
1449
else
1450
eth_broadcast_addr(p->eth_addr);
1451
1452
return p;
1453
1454
free_out:
1455
kfree(p);
1456
dec_out:
1457
br_multicast_port_ngroups_dec(port, group->vid);
1458
return NULL;
1459
}
1460
1461
void br_multicast_del_port_group(struct net_bridge_port_group *p)
1462
{
1463
struct net_bridge_port *port = p->key.port;
1464
__u16 vid = p->key.addr.vid;
1465
1466
hlist_del_init(&p->mglist);
1467
if (!br_multicast_is_star_g(&p->key.addr))
1468
rhashtable_remove_fast(&port->br->sg_port_tbl, &p->rhnode,
1469
br_sg_port_rht_params);
1470
kfree(p);
1471
br_multicast_port_ngroups_dec(port, vid);
1472
}
1473
1474
void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
1475
struct net_bridge_mdb_entry *mp, bool notify)
1476
{
1477
if (!mp->host_joined) {
1478
mp->host_joined = true;
1479
if (br_multicast_is_star_g(&mp->addr))
1480
br_multicast_star_g_host_state(mp);
1481
if (notify)
1482
br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
1483
}
1484
1485
if (br_group_is_l2(&mp->addr))
1486
return;
1487
1488
mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval);
1489
}
1490
1491
void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
1492
{
1493
if (!mp->host_joined)
1494
return;
1495
1496
mp->host_joined = false;
1497
if (br_multicast_is_star_g(&mp->addr))
1498
br_multicast_star_g_host_state(mp);
1499
if (notify)
1500
br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
1501
}
1502
1503
static struct net_bridge_port_group *
1504
__br_multicast_add_group(struct net_bridge_mcast *brmctx,
1505
struct net_bridge_mcast_port *pmctx,
1506
struct br_ip *group,
1507
const unsigned char *src,
1508
u8 filter_mode,
1509
bool igmpv2_mldv1,
1510
bool blocked)
1511
{
1512
struct net_bridge_port_group __rcu **pp;
1513
struct net_bridge_port_group *p = NULL;
1514
struct net_bridge_mdb_entry *mp;
1515
unsigned long now = jiffies;
1516
1517
if (!br_multicast_ctx_should_use(brmctx, pmctx))
1518
goto out;
1519
1520
mp = br_multicast_new_group(brmctx->br, group);
1521
if (IS_ERR(mp))
1522
return ERR_CAST(mp);
1523
1524
if (!pmctx) {
1525
br_multicast_host_join(brmctx, mp, true);
1526
goto out;
1527
}
1528
1529
for (pp = &mp->ports;
1530
(p = mlock_dereference(*pp, brmctx->br)) != NULL;
1531
pp = &p->next) {
1532
if (br_port_group_equal(p, pmctx->port, src))
1533
goto found;
1534
if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
1535
break;
1536
}
1537
1538
p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
1539
filter_mode, RTPROT_KERNEL, NULL);
1540
if (unlikely(!p)) {
1541
p = ERR_PTR(-ENOMEM);
1542
goto out;
1543
}
1544
rcu_assign_pointer(*pp, p);
1545
if (blocked)
1546
p->flags |= MDB_PG_FLAGS_BLOCKED;
1547
br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
1548
1549
found:
1550
if (igmpv2_mldv1)
1551
mod_timer(&p->timer,
1552
now + brmctx->multicast_membership_interval);
1553
1554
out:
1555
return p;
1556
}
1557
1558
static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
1559
struct net_bridge_mcast_port *pmctx,
1560
struct br_ip *group,
1561
const unsigned char *src,
1562
u8 filter_mode,
1563
bool igmpv2_mldv1)
1564
{
1565
struct net_bridge_port_group *pg;
1566
int err;
1567
1568
spin_lock(&brmctx->br->multicast_lock);
1569
pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
1570
igmpv2_mldv1, false);
1571
/* NULL is considered valid for host joined groups */
1572
err = PTR_ERR_OR_ZERO(pg);
1573
spin_unlock(&brmctx->br->multicast_lock);
1574
1575
return err;
1576
}
1577
1578
static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
1579
struct net_bridge_mcast_port *pmctx,
1580
__be32 group,
1581
__u16 vid,
1582
const unsigned char *src,
1583
bool igmpv2)
1584
{
1585
struct br_ip br_group;
1586
u8 filter_mode;
1587
1588
if (ipv4_is_local_multicast(group))
1589
return 0;
1590
1591
memset(&br_group, 0, sizeof(br_group));
1592
br_group.dst.ip4 = group;
1593
br_group.proto = htons(ETH_P_IP);
1594
br_group.vid = vid;
1595
filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1596
1597
return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1598
filter_mode, igmpv2);
1599
}
1600
1601
#if IS_ENABLED(CONFIG_IPV6)
1602
static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
1603
struct net_bridge_mcast_port *pmctx,
1604
const struct in6_addr *group,
1605
__u16 vid,
1606
const unsigned char *src,
1607
bool mldv1)
1608
{
1609
struct br_ip br_group;
1610
u8 filter_mode;
1611
1612
if (ipv6_addr_is_ll_all_nodes(group))
1613
return 0;
1614
1615
memset(&br_group, 0, sizeof(br_group));
1616
br_group.dst.ip6 = *group;
1617
br_group.proto = htons(ETH_P_IPV6);
1618
br_group.vid = vid;
1619
filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1620
1621
return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1622
filter_mode, mldv1);
1623
}
1624
#endif
1625
1626
static bool br_multicast_rport_del(struct hlist_node *rlist)
1627
{
1628
if (hlist_unhashed(rlist))
1629
return false;
1630
1631
hlist_del_init_rcu(rlist);
1632
return true;
1633
}
1634
1635
static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1636
{
1637
return br_multicast_rport_del(&pmctx->ip4_rlist);
1638
}
1639
1640
static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1641
{
1642
#if IS_ENABLED(CONFIG_IPV6)
1643
return br_multicast_rport_del(&pmctx->ip6_rlist);
1644
#else
1645
return false;
1646
#endif
1647
}
1648
1649
static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
1650
struct timer_list *t,
1651
struct hlist_node *rlist)
1652
{
1653
struct net_bridge *br = pmctx->port->br;
1654
bool del;
1655
1656
spin_lock(&br->multicast_lock);
1657
if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1658
pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1659
timer_pending(t))
1660
goto out;
1661
1662
del = br_multicast_rport_del(rlist);
1663
br_multicast_rport_del_notify(pmctx, del);
1664
out:
1665
spin_unlock(&br->multicast_lock);
1666
}
1667
1668
static void br_ip4_multicast_router_expired(struct timer_list *t)
1669
{
1670
struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t,
1671
ip4_mc_router_timer);
1672
1673
br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
1674
}
1675
1676
#if IS_ENABLED(CONFIG_IPV6)
1677
static void br_ip6_multicast_router_expired(struct timer_list *t)
1678
{
1679
struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t,
1680
ip6_mc_router_timer);
1681
1682
br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
1683
}
1684
#endif
1685
1686
static void br_mc_router_state_change(struct net_bridge *p,
1687
bool is_mc_router)
1688
{
1689
struct switchdev_attr attr = {
1690
.orig_dev = p->dev,
1691
.id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
1692
.flags = SWITCHDEV_F_DEFER,
1693
.u.mrouter = is_mc_router,
1694
};
1695
1696
switchdev_port_attr_set(p->dev, &attr, NULL);
1697
}
1698
1699
static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
1700
struct timer_list *timer)
1701
{
1702
spin_lock(&brmctx->br->multicast_lock);
1703
if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1704
brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1705
br_ip4_multicast_is_router(brmctx) ||
1706
br_ip6_multicast_is_router(brmctx))
1707
goto out;
1708
1709
br_mc_router_state_change(brmctx->br, false);
1710
out:
1711
spin_unlock(&brmctx->br->multicast_lock);
1712
}
1713
1714
static void br_ip4_multicast_local_router_expired(struct timer_list *t)
1715
{
1716
struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
1717
ip4_mc_router_timer);
1718
1719
br_multicast_local_router_expired(brmctx, t);
1720
}
1721
1722
#if IS_ENABLED(CONFIG_IPV6)
1723
static void br_ip6_multicast_local_router_expired(struct timer_list *t)
1724
{
1725
struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
1726
ip6_mc_router_timer);
1727
1728
br_multicast_local_router_expired(brmctx, t);
1729
}
1730
#endif
1731
1732
static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
1733
struct bridge_mcast_own_query *query)
1734
{
1735
spin_lock(&brmctx->br->multicast_lock);
1736
if (!netif_running(brmctx->br->dev) ||
1737
br_multicast_ctx_vlan_global_disabled(brmctx) ||
1738
!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1739
goto out;
1740
1741
br_multicast_start_querier(brmctx, query);
1742
1743
out:
1744
spin_unlock(&brmctx->br->multicast_lock);
1745
}
1746
1747
static void br_ip4_multicast_querier_expired(struct timer_list *t)
1748
{
1749
struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
1750
ip4_other_query.timer);
1751
1752
br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
1753
}
1754
1755
#if IS_ENABLED(CONFIG_IPV6)
1756
static void br_ip6_multicast_querier_expired(struct timer_list *t)
1757
{
1758
struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
1759
ip6_other_query.timer);
1760
1761
br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
1762
}
1763
#endif
1764
1765
static void br_multicast_query_delay_expired(struct timer_list *t)
1766
{
1767
}
1768
1769
static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
1770
struct br_ip *ip,
1771
struct sk_buff *skb)
1772
{
1773
if (ip->proto == htons(ETH_P_IP))
1774
brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
1775
#if IS_ENABLED(CONFIG_IPV6)
1776
else
1777
brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
1778
#endif
1779
}
1780
1781
static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
1782
struct net_bridge_mcast_port *pmctx,
1783
struct net_bridge_port_group *pg,
1784
struct br_ip *ip_dst,
1785
struct br_ip *group,
1786
bool with_srcs,
1787
u8 sflag,
1788
bool *need_rexmit)
1789
{
1790
bool over_lmqt = !!sflag;
1791
struct sk_buff *skb;
1792
u8 igmp_type;
1793
1794
if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1795
!br_multicast_ctx_matches_vlan_snooping(brmctx))
1796
return;
1797
1798
again_under_lmqt:
1799
skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
1800
with_srcs, over_lmqt, sflag, &igmp_type,
1801
need_rexmit);
1802
if (!skb)
1803
return;
1804
1805
if (pmctx) {
1806
skb->dev = pmctx->port->dev;
1807
br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
1808
BR_MCAST_DIR_TX);
1809
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1810
dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
1811
br_dev_queue_push_xmit);
1812
1813
if (over_lmqt && with_srcs && sflag) {
1814
over_lmqt = false;
1815
goto again_under_lmqt;
1816
}
1817
} else {
1818
br_multicast_select_own_querier(brmctx, group, skb);
1819
br_multicast_count(brmctx->br, NULL, skb, igmp_type,
1820
BR_MCAST_DIR_RX);
1821
netif_rx(skb);
1822
}
1823
}
1824
1825
static void br_multicast_read_querier(const struct bridge_mcast_querier *querier,
1826
struct bridge_mcast_querier *dest)
1827
{
1828
unsigned int seq;
1829
1830
memset(dest, 0, sizeof(*dest));
1831
do {
1832
seq = read_seqcount_begin(&querier->seq);
1833
dest->port_ifidx = querier->port_ifidx;
1834
memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip));
1835
} while (read_seqcount_retry(&querier->seq, seq));
1836
}
1837
1838
static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
1839
struct bridge_mcast_querier *querier,
1840
int ifindex,
1841
struct br_ip *saddr)
1842
{
1843
write_seqcount_begin(&querier->seq);
1844
querier->port_ifidx = ifindex;
1845
memcpy(&querier->addr, saddr, sizeof(*saddr));
1846
write_seqcount_end(&querier->seq);
1847
}
1848
1849
static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
1850
struct net_bridge_mcast_port *pmctx,
1851
struct bridge_mcast_own_query *own_query)
1852
{
1853
struct bridge_mcast_other_query *other_query = NULL;
1854
struct bridge_mcast_querier *querier;
1855
struct br_ip br_group;
1856
unsigned long time;
1857
1858
if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1859
!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
1860
!brmctx->multicast_querier)
1861
return;
1862
1863
memset(&br_group.dst, 0, sizeof(br_group.dst));
1864
1865
if (pmctx ? (own_query == &pmctx->ip4_own_query) :
1866
(own_query == &brmctx->ip4_own_query)) {
1867
querier = &brmctx->ip4_querier;
1868
other_query = &brmctx->ip4_other_query;
1869
br_group.proto = htons(ETH_P_IP);
1870
#if IS_ENABLED(CONFIG_IPV6)
1871
} else {
1872
querier = &brmctx->ip6_querier;
1873
other_query = &brmctx->ip6_other_query;
1874
br_group.proto = htons(ETH_P_IPV6);
1875
#endif
1876
}
1877
1878
if (!other_query || timer_pending(&other_query->timer))
1879
return;
1880
1881
/* we're about to select ourselves as querier */
1882
if (!pmctx && querier->port_ifidx) {
1883
struct br_ip zeroip = {};
1884
1885
br_multicast_update_querier(brmctx, querier, 0, &zeroip);
1886
}
1887
1888
__br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
1889
0, NULL);
1890
1891
time = jiffies;
1892
time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
1893
brmctx->multicast_startup_query_interval :
1894
brmctx->multicast_query_interval;
1895
mod_timer(&own_query->timer, time);
1896
}
1897
1898
static void
1899
br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
1900
struct bridge_mcast_own_query *query)
1901
{
1902
struct net_bridge *br = pmctx->port->br;
1903
struct net_bridge_mcast *brmctx;
1904
1905
spin_lock(&br->multicast_lock);
1906
if (br_multicast_port_ctx_state_stopped(pmctx))
1907
goto out;
1908
1909
brmctx = br_multicast_port_ctx_get_global(pmctx);
1910
if (query->startup_sent < brmctx->multicast_startup_query_count)
1911
query->startup_sent++;
1912
1913
br_multicast_send_query(brmctx, pmctx, query);
1914
1915
out:
1916
spin_unlock(&br->multicast_lock);
1917
}
1918
1919
static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1920
{
1921
struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t,
1922
ip4_own_query.timer);
1923
1924
br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
1925
}
1926
1927
#if IS_ENABLED(CONFIG_IPV6)
1928
static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1929
{
1930
struct net_bridge_mcast_port *pmctx = timer_container_of(pmctx, t,
1931
ip6_own_query.timer);
1932
1933
br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
1934
}
1935
#endif
1936
1937
static void br_multicast_port_group_rexmit(struct timer_list *t)
1938
{
1939
struct net_bridge_port_group *pg = timer_container_of(pg, t,
1940
rexmit_timer);
1941
struct bridge_mcast_other_query *other_query = NULL;
1942
struct net_bridge *br = pg->key.port->br;
1943
struct net_bridge_mcast_port *pmctx;
1944
struct net_bridge_mcast *brmctx;
1945
bool need_rexmit = false;
1946
1947
spin_lock(&br->multicast_lock);
1948
if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1949
!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1950
goto out;
1951
1952
pmctx = br_multicast_pg_to_port_ctx(pg);
1953
if (!pmctx)
1954
goto out;
1955
brmctx = br_multicast_port_ctx_get_global(pmctx);
1956
if (!brmctx->multicast_querier)
1957
goto out;
1958
1959
if (pg->key.addr.proto == htons(ETH_P_IP))
1960
other_query = &brmctx->ip4_other_query;
1961
#if IS_ENABLED(CONFIG_IPV6)
1962
else
1963
other_query = &brmctx->ip6_other_query;
1964
#endif
1965
1966
if (!other_query || timer_pending(&other_query->timer))
1967
goto out;
1968
1969
if (pg->grp_query_rexmit_cnt) {
1970
pg->grp_query_rexmit_cnt--;
1971
__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1972
&pg->key.addr, false, 1, NULL);
1973
}
1974
__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1975
&pg->key.addr, true, 0, &need_rexmit);
1976
1977
if (pg->grp_query_rexmit_cnt || need_rexmit)
1978
mod_timer(&pg->rexmit_timer, jiffies +
1979
brmctx->multicast_last_member_interval);
1980
out:
1981
spin_unlock(&br->multicast_lock);
1982
}
1983
1984
static int br_mc_disabled_update(struct net_device *dev, bool value,
1985
struct netlink_ext_ack *extack)
1986
{
1987
struct switchdev_attr attr = {
1988
.orig_dev = dev,
1989
.id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1990
.flags = SWITCHDEV_F_DEFER,
1991
.u.mc_disabled = !value,
1992
};
1993
1994
return switchdev_port_attr_set(dev, &attr, extack);
1995
}
1996
1997
void br_multicast_port_ctx_init(struct net_bridge_port *port,
1998
struct net_bridge_vlan *vlan,
1999
struct net_bridge_mcast_port *pmctx)
2000
{
2001
pmctx->port = port;
2002
pmctx->vlan = vlan;
2003
pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2004
timer_setup(&pmctx->ip4_mc_router_timer,
2005
br_ip4_multicast_router_expired, 0);
2006
timer_setup(&pmctx->ip4_own_query.timer,
2007
br_ip4_multicast_port_query_expired, 0);
2008
#if IS_ENABLED(CONFIG_IPV6)
2009
timer_setup(&pmctx->ip6_mc_router_timer,
2010
br_ip6_multicast_router_expired, 0);
2011
timer_setup(&pmctx->ip6_own_query.timer,
2012
br_ip6_multicast_port_query_expired, 0);
2013
#endif
2014
}
2015
2016
void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
2017
{
2018
struct net_bridge *br = pmctx->port->br;
2019
bool del = false;
2020
2021
#if IS_ENABLED(CONFIG_IPV6)
2022
timer_delete_sync(&pmctx->ip6_mc_router_timer);
2023
#endif
2024
timer_delete_sync(&pmctx->ip4_mc_router_timer);
2025
2026
spin_lock_bh(&br->multicast_lock);
2027
del |= br_ip6_multicast_rport_del(pmctx);
2028
del |= br_ip4_multicast_rport_del(pmctx);
2029
br_multicast_rport_del_notify(pmctx, del);
2030
spin_unlock_bh(&br->multicast_lock);
2031
}
2032
2033
int br_multicast_add_port(struct net_bridge_port *port)
2034
{
2035
int err;
2036
2037
port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
2038
br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
2039
2040
err = br_mc_disabled_update(port->dev,
2041
br_opt_get(port->br,
2042
BROPT_MULTICAST_ENABLED),
2043
NULL);
2044
if (err && err != -EOPNOTSUPP)
2045
return err;
2046
2047
port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2048
if (!port->mcast_stats)
2049
return -ENOMEM;
2050
2051
return 0;
2052
}
2053
2054
void br_multicast_del_port(struct net_bridge_port *port)
2055
{
2056
struct net_bridge *br = port->br;
2057
struct net_bridge_port_group *pg;
2058
struct hlist_node *n;
2059
2060
/* Take care of the remaining groups, only perm ones should be left */
2061
spin_lock_bh(&br->multicast_lock);
2062
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
2063
br_multicast_find_del_pg(br, pg);
2064
spin_unlock_bh(&br->multicast_lock);
2065
flush_work(&br->mcast_gc_work);
2066
br_multicast_port_ctx_deinit(&port->multicast_ctx);
2067
free_percpu(port->mcast_stats);
2068
}
2069
2070
static void br_multicast_enable(struct bridge_mcast_own_query *query)
2071
{
2072
query->startup_sent = 0;
2073
2074
if (timer_delete_sync_try(&query->timer) >= 0 ||
2075
timer_delete(&query->timer))
2076
mod_timer(&query->timer, jiffies);
2077
}
2078
2079
static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
2080
{
2081
struct net_bridge *br = pmctx->port->br;
2082
struct net_bridge_mcast *brmctx;
2083
2084
brmctx = br_multicast_port_ctx_get_global(pmctx);
2085
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
2086
!netif_running(br->dev))
2087
return;
2088
2089
br_multicast_enable(&pmctx->ip4_own_query);
2090
#if IS_ENABLED(CONFIG_IPV6)
2091
br_multicast_enable(&pmctx->ip6_own_query);
2092
#endif
2093
if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
2094
br_ip4_multicast_add_router(brmctx, pmctx);
2095
br_ip6_multicast_add_router(brmctx, pmctx);
2096
}
2097
2098
if (br_multicast_port_ctx_is_vlan(pmctx)) {
2099
struct net_bridge_port_group *pg;
2100
u32 n = 0;
2101
2102
/* The mcast_n_groups counter might be wrong. First,
2103
* BR_VLFLAG_MCAST_ENABLED is toggled before temporary entries
2104
* are flushed, thus mcast_n_groups after the toggle does not
2105
* reflect the true values. And second, permanent entries added
2106
* while BR_VLFLAG_MCAST_ENABLED was disabled, are not reflected
2107
* either. Thus we have to refresh the counter.
2108
*/
2109
2110
hlist_for_each_entry(pg, &pmctx->port->mglist, mglist) {
2111
if (pg->key.addr.vid == pmctx->vlan->vid)
2112
n++;
2113
}
2114
WRITE_ONCE(pmctx->mdb_n_entries, n);
2115
}
2116
}
2117
2118
static void br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
2119
{
2120
struct net_bridge *br = pmctx->port->br;
2121
2122
spin_lock_bh(&br->multicast_lock);
2123
if (br_multicast_port_ctx_is_vlan(pmctx) &&
2124
!(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) {
2125
spin_unlock_bh(&br->multicast_lock);
2126
return;
2127
}
2128
__br_multicast_enable_port_ctx(pmctx);
2129
spin_unlock_bh(&br->multicast_lock);
2130
}
2131
2132
static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
2133
{
2134
struct net_bridge_port_group *pg;
2135
struct hlist_node *n;
2136
bool del = false;
2137
2138
hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
2139
if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
2140
(!br_multicast_port_ctx_is_vlan(pmctx) ||
2141
pg->key.addr.vid == pmctx->vlan->vid))
2142
br_multicast_find_del_pg(pmctx->port->br, pg);
2143
2144
del |= br_ip4_multicast_rport_del(pmctx);
2145
timer_delete(&pmctx->ip4_mc_router_timer);
2146
timer_delete(&pmctx->ip4_own_query.timer);
2147
del |= br_ip6_multicast_rport_del(pmctx);
2148
#if IS_ENABLED(CONFIG_IPV6)
2149
timer_delete(&pmctx->ip6_mc_router_timer);
2150
timer_delete(&pmctx->ip6_own_query.timer);
2151
#endif
2152
br_multicast_rport_del_notify(pmctx, del);
2153
}
2154
2155
static void br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
2156
{
2157
struct net_bridge *br = pmctx->port->br;
2158
2159
spin_lock_bh(&br->multicast_lock);
2160
if (br_multicast_port_ctx_is_vlan(pmctx) &&
2161
!(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) {
2162
spin_unlock_bh(&br->multicast_lock);
2163
return;
2164
}
2165
2166
__br_multicast_disable_port_ctx(pmctx);
2167
spin_unlock_bh(&br->multicast_lock);
2168
}
2169
2170
static void br_multicast_toggle_port(struct net_bridge_port *port, bool on)
2171
{
2172
#if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
2173
if (br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
2174
struct net_bridge_vlan_group *vg;
2175
struct net_bridge_vlan *vlan;
2176
2177
rcu_read_lock();
2178
vg = nbp_vlan_group_rcu(port);
2179
if (!vg) {
2180
rcu_read_unlock();
2181
return;
2182
}
2183
2184
/* iterate each vlan, toggle vlan multicast context */
2185
list_for_each_entry_rcu(vlan, &vg->vlan_list, vlist) {
2186
struct net_bridge_mcast_port *pmctx =
2187
&vlan->port_mcast_ctx;
2188
u8 state = br_vlan_get_state(vlan);
2189
/* enable vlan multicast context when state is
2190
* LEARNING or FORWARDING
2191
*/
2192
if (on && br_vlan_state_allowed(state, true))
2193
br_multicast_enable_port_ctx(pmctx);
2194
else
2195
br_multicast_disable_port_ctx(pmctx);
2196
}
2197
rcu_read_unlock();
2198
return;
2199
}
2200
#endif
2201
/* toggle port multicast context when vlan snooping is disabled */
2202
if (on)
2203
br_multicast_enable_port_ctx(&port->multicast_ctx);
2204
else
2205
br_multicast_disable_port_ctx(&port->multicast_ctx);
2206
}
2207
2208
void br_multicast_enable_port(struct net_bridge_port *port)
2209
{
2210
br_multicast_toggle_port(port, true);
2211
}
2212
2213
void br_multicast_disable_port(struct net_bridge_port *port)
2214
{
2215
br_multicast_toggle_port(port, false);
2216
}
2217
2218
static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
2219
{
2220
struct net_bridge_group_src *ent;
2221
struct hlist_node *tmp;
2222
int deleted = 0;
2223
2224
hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
2225
if (ent->flags & BR_SGRP_F_DELETE) {
2226
br_multicast_del_group_src(ent, false);
2227
deleted++;
2228
}
2229
2230
return deleted;
2231
}
2232
2233
static void __grp_src_mod_timer(struct net_bridge_group_src *src,
2234
unsigned long expires)
2235
{
2236
mod_timer(&src->timer, expires);
2237
br_multicast_fwd_src_handle(src);
2238
}
2239
2240
static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
2241
struct net_bridge_mcast_port *pmctx,
2242
struct net_bridge_port_group *pg)
2243
{
2244
struct bridge_mcast_other_query *other_query = NULL;
2245
u32 lmqc = brmctx->multicast_last_member_count;
2246
unsigned long lmqt, lmi, now = jiffies;
2247
struct net_bridge_group_src *ent;
2248
2249
if (!netif_running(brmctx->br->dev) ||
2250
!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2251
return;
2252
2253
if (pg->key.addr.proto == htons(ETH_P_IP))
2254
other_query = &brmctx->ip4_other_query;
2255
#if IS_ENABLED(CONFIG_IPV6)
2256
else
2257
other_query = &brmctx->ip6_other_query;
2258
#endif
2259
2260
lmqt = now + br_multicast_lmqt(brmctx);
2261
hlist_for_each_entry(ent, &pg->src_list, node) {
2262
if (ent->flags & BR_SGRP_F_SEND) {
2263
ent->flags &= ~BR_SGRP_F_SEND;
2264
if (ent->timer.expires > lmqt) {
2265
if (brmctx->multicast_querier &&
2266
other_query &&
2267
!timer_pending(&other_query->timer))
2268
ent->src_query_rexmit_cnt = lmqc;
2269
__grp_src_mod_timer(ent, lmqt);
2270
}
2271
}
2272
}
2273
2274
if (!brmctx->multicast_querier ||
2275
!other_query || timer_pending(&other_query->timer))
2276
return;
2277
2278
__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2279
&pg->key.addr, true, 1, NULL);
2280
2281
lmi = now + brmctx->multicast_last_member_interval;
2282
if (!timer_pending(&pg->rexmit_timer) ||
2283
time_after(pg->rexmit_timer.expires, lmi))
2284
mod_timer(&pg->rexmit_timer, lmi);
2285
}
2286
2287
static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
2288
struct net_bridge_mcast_port *pmctx,
2289
struct net_bridge_port_group *pg)
2290
{
2291
struct bridge_mcast_other_query *other_query = NULL;
2292
unsigned long now = jiffies, lmi;
2293
2294
if (!netif_running(brmctx->br->dev) ||
2295
!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2296
return;
2297
2298
if (pg->key.addr.proto == htons(ETH_P_IP))
2299
other_query = &brmctx->ip4_other_query;
2300
#if IS_ENABLED(CONFIG_IPV6)
2301
else
2302
other_query = &brmctx->ip6_other_query;
2303
#endif
2304
2305
if (brmctx->multicast_querier &&
2306
other_query && !timer_pending(&other_query->timer)) {
2307
lmi = now + brmctx->multicast_last_member_interval;
2308
pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
2309
__br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2310
&pg->key.addr, false, 0, NULL);
2311
if (!timer_pending(&pg->rexmit_timer) ||
2312
time_after(pg->rexmit_timer.expires, lmi))
2313
mod_timer(&pg->rexmit_timer, lmi);
2314
}
2315
2316
if (pg->filter_mode == MCAST_EXCLUDE &&
2317
(!timer_pending(&pg->timer) ||
2318
time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
2319
mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
2320
}
2321
2322
/* State Msg type New state Actions
2323
* INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
2324
* INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
2325
* EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
2326
*/
2327
static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
2328
struct net_bridge_port_group *pg, void *h_addr,
2329
void *srcs, u32 nsrcs, size_t addr_size,
2330
int grec_type)
2331
{
2332
struct net_bridge_group_src *ent;
2333
unsigned long now = jiffies;
2334
bool changed = false;
2335
struct br_ip src_ip;
2336
u32 src_idx;
2337
2338
memset(&src_ip, 0, sizeof(src_ip));
2339
src_ip.proto = pg->key.addr.proto;
2340
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2341
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2342
ent = br_multicast_find_group_src(pg, &src_ip);
2343
if (!ent) {
2344
ent = br_multicast_new_group_src(pg, &src_ip);
2345
if (ent)
2346
changed = true;
2347
}
2348
2349
if (ent)
2350
__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2351
}
2352
2353
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2354
grec_type))
2355
changed = true;
2356
2357
return changed;
2358
}
2359
2360
/* State Msg type New state Actions
2361
* INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
2362
* Delete (A-B)
2363
* Group Timer=GMI
2364
*/
2365
static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
2366
struct net_bridge_port_group *pg, void *h_addr,
2367
void *srcs, u32 nsrcs, size_t addr_size,
2368
int grec_type)
2369
{
2370
struct net_bridge_group_src *ent;
2371
struct br_ip src_ip;
2372
u32 src_idx;
2373
2374
hlist_for_each_entry(ent, &pg->src_list, node)
2375
ent->flags |= BR_SGRP_F_DELETE;
2376
2377
memset(&src_ip, 0, sizeof(src_ip));
2378
src_ip.proto = pg->key.addr.proto;
2379
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2380
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2381
ent = br_multicast_find_group_src(pg, &src_ip);
2382
if (ent)
2383
ent->flags &= ~BR_SGRP_F_DELETE;
2384
else
2385
ent = br_multicast_new_group_src(pg, &src_ip);
2386
if (ent)
2387
br_multicast_fwd_src_handle(ent);
2388
}
2389
2390
br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2391
grec_type);
2392
2393
__grp_src_delete_marked(pg);
2394
}
2395
2396
/* State Msg type New state Actions
2397
* EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
2398
* Delete (X-A)
2399
* Delete (Y-A)
2400
* Group Timer=GMI
2401
*/
2402
static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
2403
struct net_bridge_port_group *pg, void *h_addr,
2404
void *srcs, u32 nsrcs, size_t addr_size,
2405
int grec_type)
2406
{
2407
struct net_bridge_group_src *ent;
2408
unsigned long now = jiffies;
2409
bool changed = false;
2410
struct br_ip src_ip;
2411
u32 src_idx;
2412
2413
hlist_for_each_entry(ent, &pg->src_list, node)
2414
ent->flags |= BR_SGRP_F_DELETE;
2415
2416
memset(&src_ip, 0, sizeof(src_ip));
2417
src_ip.proto = pg->key.addr.proto;
2418
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2419
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2420
ent = br_multicast_find_group_src(pg, &src_ip);
2421
if (ent) {
2422
ent->flags &= ~BR_SGRP_F_DELETE;
2423
} else {
2424
ent = br_multicast_new_group_src(pg, &src_ip);
2425
if (ent) {
2426
__grp_src_mod_timer(ent,
2427
now + br_multicast_gmi(brmctx));
2428
changed = true;
2429
}
2430
}
2431
}
2432
2433
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2434
grec_type))
2435
changed = true;
2436
2437
if (__grp_src_delete_marked(pg))
2438
changed = true;
2439
2440
return changed;
2441
}
2442
2443
static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
2444
struct net_bridge_port_group *pg, void *h_addr,
2445
void *srcs, u32 nsrcs, size_t addr_size,
2446
int grec_type)
2447
{
2448
bool changed = false;
2449
2450
switch (pg->filter_mode) {
2451
case MCAST_INCLUDE:
2452
__grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2453
grec_type);
2454
br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2455
changed = true;
2456
break;
2457
case MCAST_EXCLUDE:
2458
changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
2459
addr_size, grec_type);
2460
break;
2461
}
2462
2463
pg->filter_mode = MCAST_EXCLUDE;
2464
mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2465
2466
return changed;
2467
}
2468
2469
/* State Msg type New state Actions
2470
* INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
2471
* Send Q(G,A-B)
2472
*/
2473
static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
2474
struct net_bridge_mcast_port *pmctx,
2475
struct net_bridge_port_group *pg, void *h_addr,
2476
void *srcs, u32 nsrcs, size_t addr_size,
2477
int grec_type)
2478
{
2479
u32 src_idx, to_send = pg->src_ents;
2480
struct net_bridge_group_src *ent;
2481
unsigned long now = jiffies;
2482
bool changed = false;
2483
struct br_ip src_ip;
2484
2485
hlist_for_each_entry(ent, &pg->src_list, node)
2486
ent->flags |= BR_SGRP_F_SEND;
2487
2488
memset(&src_ip, 0, sizeof(src_ip));
2489
src_ip.proto = pg->key.addr.proto;
2490
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2491
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2492
ent = br_multicast_find_group_src(pg, &src_ip);
2493
if (ent) {
2494
ent->flags &= ~BR_SGRP_F_SEND;
2495
to_send--;
2496
} else {
2497
ent = br_multicast_new_group_src(pg, &src_ip);
2498
if (ent)
2499
changed = true;
2500
}
2501
if (ent)
2502
__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2503
}
2504
2505
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2506
grec_type))
2507
changed = true;
2508
2509
if (to_send)
2510
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2511
2512
return changed;
2513
}
2514
2515
/* State Msg type New state Actions
2516
* EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
2517
* Send Q(G,X-A)
2518
* Send Q(G)
2519
*/
2520
static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
2521
struct net_bridge_mcast_port *pmctx,
2522
struct net_bridge_port_group *pg, void *h_addr,
2523
void *srcs, u32 nsrcs, size_t addr_size,
2524
int grec_type)
2525
{
2526
u32 src_idx, to_send = pg->src_ents;
2527
struct net_bridge_group_src *ent;
2528
unsigned long now = jiffies;
2529
bool changed = false;
2530
struct br_ip src_ip;
2531
2532
hlist_for_each_entry(ent, &pg->src_list, node)
2533
if (timer_pending(&ent->timer))
2534
ent->flags |= BR_SGRP_F_SEND;
2535
2536
memset(&src_ip, 0, sizeof(src_ip));
2537
src_ip.proto = pg->key.addr.proto;
2538
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2539
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2540
ent = br_multicast_find_group_src(pg, &src_ip);
2541
if (ent) {
2542
if (timer_pending(&ent->timer)) {
2543
ent->flags &= ~BR_SGRP_F_SEND;
2544
to_send--;
2545
}
2546
} else {
2547
ent = br_multicast_new_group_src(pg, &src_ip);
2548
if (ent)
2549
changed = true;
2550
}
2551
if (ent)
2552
__grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2553
}
2554
2555
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2556
grec_type))
2557
changed = true;
2558
2559
if (to_send)
2560
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2561
2562
__grp_send_query_and_rexmit(brmctx, pmctx, pg);
2563
2564
return changed;
2565
}
2566
2567
static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
2568
struct net_bridge_mcast_port *pmctx,
2569
struct net_bridge_port_group *pg, void *h_addr,
2570
void *srcs, u32 nsrcs, size_t addr_size,
2571
int grec_type)
2572
{
2573
bool changed = false;
2574
2575
switch (pg->filter_mode) {
2576
case MCAST_INCLUDE:
2577
changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
2578
nsrcs, addr_size, grec_type);
2579
break;
2580
case MCAST_EXCLUDE:
2581
changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
2582
nsrcs, addr_size, grec_type);
2583
break;
2584
}
2585
2586
if (br_multicast_eht_should_del_pg(pg)) {
2587
pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2588
br_multicast_find_del_pg(pg->key.port->br, pg);
2589
/* a notification has already been sent and we shouldn't
2590
* access pg after the delete so we have to return false
2591
*/
2592
changed = false;
2593
}
2594
2595
return changed;
2596
}
2597
2598
/* State Msg type New state Actions
2599
* INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
2600
* Delete (A-B)
2601
* Send Q(G,A*B)
2602
* Group Timer=GMI
2603
*/
2604
static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
2605
struct net_bridge_mcast_port *pmctx,
2606
struct net_bridge_port_group *pg, void *h_addr,
2607
void *srcs, u32 nsrcs, size_t addr_size,
2608
int grec_type)
2609
{
2610
struct net_bridge_group_src *ent;
2611
u32 src_idx, to_send = 0;
2612
struct br_ip src_ip;
2613
2614
hlist_for_each_entry(ent, &pg->src_list, node)
2615
ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2616
2617
memset(&src_ip, 0, sizeof(src_ip));
2618
src_ip.proto = pg->key.addr.proto;
2619
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2620
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2621
ent = br_multicast_find_group_src(pg, &src_ip);
2622
if (ent) {
2623
ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
2624
BR_SGRP_F_SEND;
2625
to_send++;
2626
} else {
2627
ent = br_multicast_new_group_src(pg, &src_ip);
2628
}
2629
if (ent)
2630
br_multicast_fwd_src_handle(ent);
2631
}
2632
2633
br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2634
grec_type);
2635
2636
__grp_src_delete_marked(pg);
2637
if (to_send)
2638
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2639
}
2640
2641
/* State Msg type New state Actions
2642
* EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
2643
* Delete (X-A)
2644
* Delete (Y-A)
2645
* Send Q(G,A-Y)
2646
* Group Timer=GMI
2647
*/
2648
static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
2649
struct net_bridge_mcast_port *pmctx,
2650
struct net_bridge_port_group *pg, void *h_addr,
2651
void *srcs, u32 nsrcs, size_t addr_size,
2652
int grec_type)
2653
{
2654
struct net_bridge_group_src *ent;
2655
u32 src_idx, to_send = 0;
2656
bool changed = false;
2657
struct br_ip src_ip;
2658
2659
hlist_for_each_entry(ent, &pg->src_list, node)
2660
ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2661
2662
memset(&src_ip, 0, sizeof(src_ip));
2663
src_ip.proto = pg->key.addr.proto;
2664
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2665
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2666
ent = br_multicast_find_group_src(pg, &src_ip);
2667
if (ent) {
2668
ent->flags &= ~BR_SGRP_F_DELETE;
2669
} else {
2670
ent = br_multicast_new_group_src(pg, &src_ip);
2671
if (ent) {
2672
__grp_src_mod_timer(ent, pg->timer.expires);
2673
changed = true;
2674
}
2675
}
2676
if (ent && timer_pending(&ent->timer)) {
2677
ent->flags |= BR_SGRP_F_SEND;
2678
to_send++;
2679
}
2680
}
2681
2682
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2683
grec_type))
2684
changed = true;
2685
2686
if (__grp_src_delete_marked(pg))
2687
changed = true;
2688
if (to_send)
2689
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2690
2691
return changed;
2692
}
2693
2694
static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
2695
struct net_bridge_mcast_port *pmctx,
2696
struct net_bridge_port_group *pg, void *h_addr,
2697
void *srcs, u32 nsrcs, size_t addr_size,
2698
int grec_type)
2699
{
2700
bool changed = false;
2701
2702
switch (pg->filter_mode) {
2703
case MCAST_INCLUDE:
2704
__grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
2705
addr_size, grec_type);
2706
br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2707
changed = true;
2708
break;
2709
case MCAST_EXCLUDE:
2710
changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
2711
nsrcs, addr_size, grec_type);
2712
break;
2713
}
2714
2715
pg->filter_mode = MCAST_EXCLUDE;
2716
mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2717
2718
return changed;
2719
}
2720
2721
/* State Msg type New state Actions
2722
* INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
2723
*/
2724
static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
2725
struct net_bridge_mcast_port *pmctx,
2726
struct net_bridge_port_group *pg, void *h_addr,
2727
void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2728
{
2729
struct net_bridge_group_src *ent;
2730
u32 src_idx, to_send = 0;
2731
bool changed = false;
2732
struct br_ip src_ip;
2733
2734
hlist_for_each_entry(ent, &pg->src_list, node)
2735
ent->flags &= ~BR_SGRP_F_SEND;
2736
2737
memset(&src_ip, 0, sizeof(src_ip));
2738
src_ip.proto = pg->key.addr.proto;
2739
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2740
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2741
ent = br_multicast_find_group_src(pg, &src_ip);
2742
if (ent) {
2743
ent->flags |= BR_SGRP_F_SEND;
2744
to_send++;
2745
}
2746
}
2747
2748
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2749
grec_type))
2750
changed = true;
2751
2752
if (to_send)
2753
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2754
2755
return changed;
2756
}
2757
2758
/* State Msg type New state Actions
2759
* EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
2760
* Send Q(G,A-Y)
2761
*/
2762
static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
2763
struct net_bridge_mcast_port *pmctx,
2764
struct net_bridge_port_group *pg, void *h_addr,
2765
void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2766
{
2767
struct net_bridge_group_src *ent;
2768
u32 src_idx, to_send = 0;
2769
bool changed = false;
2770
struct br_ip src_ip;
2771
2772
hlist_for_each_entry(ent, &pg->src_list, node)
2773
ent->flags &= ~BR_SGRP_F_SEND;
2774
2775
memset(&src_ip, 0, sizeof(src_ip));
2776
src_ip.proto = pg->key.addr.proto;
2777
for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2778
memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2779
ent = br_multicast_find_group_src(pg, &src_ip);
2780
if (!ent) {
2781
ent = br_multicast_new_group_src(pg, &src_ip);
2782
if (ent) {
2783
__grp_src_mod_timer(ent, pg->timer.expires);
2784
changed = true;
2785
}
2786
}
2787
if (ent && timer_pending(&ent->timer)) {
2788
ent->flags |= BR_SGRP_F_SEND;
2789
to_send++;
2790
}
2791
}
2792
2793
if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2794
grec_type))
2795
changed = true;
2796
2797
if (to_send)
2798
__grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2799
2800
return changed;
2801
}
2802
2803
static bool br_multicast_block(struct net_bridge_mcast *brmctx,
2804
struct net_bridge_mcast_port *pmctx,
2805
struct net_bridge_port_group *pg, void *h_addr,
2806
void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2807
{
2808
bool changed = false;
2809
2810
switch (pg->filter_mode) {
2811
case MCAST_INCLUDE:
2812
changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
2813
nsrcs, addr_size, grec_type);
2814
break;
2815
case MCAST_EXCLUDE:
2816
changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
2817
nsrcs, addr_size, grec_type);
2818
break;
2819
}
2820
2821
if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
2822
br_multicast_eht_should_del_pg(pg)) {
2823
if (br_multicast_eht_should_del_pg(pg))
2824
pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2825
br_multicast_find_del_pg(pg->key.port->br, pg);
2826
/* a notification has already been sent and we shouldn't
2827
* access pg after the delete so we have to return false
2828
*/
2829
changed = false;
2830
}
2831
2832
return changed;
2833
}
2834
2835
static struct net_bridge_port_group *
2836
br_multicast_find_port(struct net_bridge_mdb_entry *mp,
2837
struct net_bridge_port *p,
2838
const unsigned char *src)
2839
{
2840
struct net_bridge *br __maybe_unused = mp->br;
2841
struct net_bridge_port_group *pg;
2842
2843
for (pg = mlock_dereference(mp->ports, br);
2844
pg;
2845
pg = mlock_dereference(pg->next, br))
2846
if (br_port_group_equal(pg, p, src))
2847
return pg;
2848
2849
return NULL;
2850
}
2851
2852
static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
2853
struct net_bridge_mcast_port *pmctx,
2854
struct sk_buff *skb,
2855
u16 vid)
2856
{
2857
bool igmpv2 = brmctx->multicast_igmp_version == 2;
2858
struct net_bridge_mdb_entry *mdst;
2859
struct net_bridge_port_group *pg;
2860
const unsigned char *src;
2861
struct igmpv3_report *ih;
2862
struct igmpv3_grec *grec;
2863
int i, len, num, type;
2864
__be32 group, *h_addr;
2865
bool changed = false;
2866
int err = 0;
2867
u16 nsrcs;
2868
2869
ih = igmpv3_report_hdr(skb);
2870
num = ntohs(ih->ngrec);
2871
len = skb_transport_offset(skb) + sizeof(*ih);
2872
2873
for (i = 0; i < num; i++) {
2874
len += sizeof(*grec);
2875
if (!ip_mc_may_pull(skb, len))
2876
return -EINVAL;
2877
2878
grec = (void *)(skb->data + len - sizeof(*grec));
2879
group = grec->grec_mca;
2880
type = grec->grec_type;
2881
nsrcs = ntohs(grec->grec_nsrcs);
2882
2883
len += nsrcs * 4;
2884
if (!ip_mc_may_pull(skb, len))
2885
return -EINVAL;
2886
2887
switch (type) {
2888
case IGMPV3_MODE_IS_INCLUDE:
2889
case IGMPV3_MODE_IS_EXCLUDE:
2890
case IGMPV3_CHANGE_TO_INCLUDE:
2891
case IGMPV3_CHANGE_TO_EXCLUDE:
2892
case IGMPV3_ALLOW_NEW_SOURCES:
2893
case IGMPV3_BLOCK_OLD_SOURCES:
2894
break;
2895
2896
default:
2897
continue;
2898
}
2899
2900
src = eth_hdr(skb)->h_source;
2901
if (nsrcs == 0 &&
2902
(type == IGMPV3_CHANGE_TO_INCLUDE ||
2903
type == IGMPV3_MODE_IS_INCLUDE)) {
2904
if (!pmctx || igmpv2) {
2905
br_ip4_multicast_leave_group(brmctx, pmctx,
2906
group, vid, src);
2907
continue;
2908
}
2909
} else {
2910
err = br_ip4_multicast_add_group(brmctx, pmctx, group,
2911
vid, src, igmpv2);
2912
if (err)
2913
break;
2914
}
2915
2916
if (!pmctx || igmpv2)
2917
continue;
2918
2919
spin_lock(&brmctx->br->multicast_lock);
2920
if (!br_multicast_ctx_should_use(brmctx, pmctx))
2921
goto unlock_continue;
2922
2923
mdst = br_mdb_ip4_get(brmctx->br, group, vid);
2924
if (!mdst)
2925
goto unlock_continue;
2926
pg = br_multicast_find_port(mdst, pmctx->port, src);
2927
if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2928
goto unlock_continue;
2929
/* reload grec and host addr */
2930
grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
2931
h_addr = &ip_hdr(skb)->saddr;
2932
switch (type) {
2933
case IGMPV3_ALLOW_NEW_SOURCES:
2934
changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2935
grec->grec_src,
2936
nsrcs, sizeof(__be32), type);
2937
break;
2938
case IGMPV3_MODE_IS_INCLUDE:
2939
changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2940
grec->grec_src,
2941
nsrcs, sizeof(__be32), type);
2942
break;
2943
case IGMPV3_MODE_IS_EXCLUDE:
2944
changed = br_multicast_isexc(brmctx, pg, h_addr,
2945
grec->grec_src,
2946
nsrcs, sizeof(__be32), type);
2947
break;
2948
case IGMPV3_CHANGE_TO_INCLUDE:
2949
changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2950
grec->grec_src,
2951
nsrcs, sizeof(__be32), type);
2952
break;
2953
case IGMPV3_CHANGE_TO_EXCLUDE:
2954
changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2955
grec->grec_src,
2956
nsrcs, sizeof(__be32), type);
2957
break;
2958
case IGMPV3_BLOCK_OLD_SOURCES:
2959
changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2960
grec->grec_src,
2961
nsrcs, sizeof(__be32), type);
2962
break;
2963
}
2964
if (changed)
2965
br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2966
unlock_continue:
2967
spin_unlock(&brmctx->br->multicast_lock);
2968
}
2969
2970
return err;
2971
}
2972
2973
#if IS_ENABLED(CONFIG_IPV6)
2974
static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
2975
struct net_bridge_mcast_port *pmctx,
2976
struct sk_buff *skb,
2977
u16 vid)
2978
{
2979
bool mldv1 = brmctx->multicast_mld_version == 1;
2980
struct net_bridge_mdb_entry *mdst;
2981
struct net_bridge_port_group *pg;
2982
unsigned int nsrcs_offset;
2983
struct mld2_report *mld2r;
2984
const unsigned char *src;
2985
struct in6_addr *h_addr;
2986
struct mld2_grec *grec;
2987
unsigned int grec_len;
2988
bool changed = false;
2989
int i, len, num;
2990
int err = 0;
2991
2992
if (!ipv6_mc_may_pull(skb, sizeof(*mld2r)))
2993
return -EINVAL;
2994
2995
mld2r = (struct mld2_report *)icmp6_hdr(skb);
2996
num = ntohs(mld2r->mld2r_ngrec);
2997
len = skb_transport_offset(skb) + sizeof(*mld2r);
2998
2999
for (i = 0; i < num; i++) {
3000
__be16 *_nsrcs, __nsrcs;
3001
u16 nsrcs;
3002
3003
nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
3004
3005
if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
3006
nsrcs_offset + sizeof(__nsrcs))
3007
return -EINVAL;
3008
3009
_nsrcs = skb_header_pointer(skb, nsrcs_offset,
3010
sizeof(__nsrcs), &__nsrcs);
3011
if (!_nsrcs)
3012
return -EINVAL;
3013
3014
nsrcs = ntohs(*_nsrcs);
3015
grec_len = struct_size(grec, grec_src, nsrcs);
3016
3017
if (!ipv6_mc_may_pull(skb, len + grec_len))
3018
return -EINVAL;
3019
3020
grec = (struct mld2_grec *)(skb->data + len);
3021
len += grec_len;
3022
3023
switch (grec->grec_type) {
3024
case MLD2_MODE_IS_INCLUDE:
3025
case MLD2_MODE_IS_EXCLUDE:
3026
case MLD2_CHANGE_TO_INCLUDE:
3027
case MLD2_CHANGE_TO_EXCLUDE:
3028
case MLD2_ALLOW_NEW_SOURCES:
3029
case MLD2_BLOCK_OLD_SOURCES:
3030
break;
3031
3032
default:
3033
continue;
3034
}
3035
3036
src = eth_hdr(skb)->h_source;
3037
if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
3038
grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
3039
nsrcs == 0) {
3040
if (!pmctx || mldv1) {
3041
br_ip6_multicast_leave_group(brmctx, pmctx,
3042
&grec->grec_mca,
3043
vid, src);
3044
continue;
3045
}
3046
} else {
3047
err = br_ip6_multicast_add_group(brmctx, pmctx,
3048
&grec->grec_mca, vid,
3049
src, mldv1);
3050
if (err)
3051
break;
3052
}
3053
3054
if (!pmctx || mldv1)
3055
continue;
3056
3057
spin_lock(&brmctx->br->multicast_lock);
3058
if (!br_multicast_ctx_should_use(brmctx, pmctx))
3059
goto unlock_continue;
3060
3061
mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
3062
if (!mdst)
3063
goto unlock_continue;
3064
pg = br_multicast_find_port(mdst, pmctx->port, src);
3065
if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
3066
goto unlock_continue;
3067
h_addr = &ipv6_hdr(skb)->saddr;
3068
switch (grec->grec_type) {
3069
case MLD2_ALLOW_NEW_SOURCES:
3070
changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
3071
grec->grec_src, nsrcs,
3072
sizeof(struct in6_addr),
3073
grec->grec_type);
3074
break;
3075
case MLD2_MODE_IS_INCLUDE:
3076
changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
3077
grec->grec_src, nsrcs,
3078
sizeof(struct in6_addr),
3079
grec->grec_type);
3080
break;
3081
case MLD2_MODE_IS_EXCLUDE:
3082
changed = br_multicast_isexc(brmctx, pg, h_addr,
3083
grec->grec_src, nsrcs,
3084
sizeof(struct in6_addr),
3085
grec->grec_type);
3086
break;
3087
case MLD2_CHANGE_TO_INCLUDE:
3088
changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
3089
grec->grec_src, nsrcs,
3090
sizeof(struct in6_addr),
3091
grec->grec_type);
3092
break;
3093
case MLD2_CHANGE_TO_EXCLUDE:
3094
changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
3095
grec->grec_src, nsrcs,
3096
sizeof(struct in6_addr),
3097
grec->grec_type);
3098
break;
3099
case MLD2_BLOCK_OLD_SOURCES:
3100
changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
3101
grec->grec_src, nsrcs,
3102
sizeof(struct in6_addr),
3103
grec->grec_type);
3104
break;
3105
}
3106
if (changed)
3107
br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
3108
unlock_continue:
3109
spin_unlock(&brmctx->br->multicast_lock);
3110
}
3111
3112
return err;
3113
}
3114
#endif
3115
3116
static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx,
3117
struct net_bridge_mcast_port *pmctx,
3118
struct br_ip *saddr)
3119
{
3120
int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0;
3121
struct timer_list *own_timer, *other_timer;
3122
struct bridge_mcast_querier *querier;
3123
3124
switch (saddr->proto) {
3125
case htons(ETH_P_IP):
3126
querier = &brmctx->ip4_querier;
3127
own_timer = &brmctx->ip4_own_query.timer;
3128
other_timer = &brmctx->ip4_other_query.timer;
3129
if (!querier->addr.src.ip4 ||
3130
ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4))
3131
goto update;
3132
break;
3133
#if IS_ENABLED(CONFIG_IPV6)
3134
case htons(ETH_P_IPV6):
3135
querier = &brmctx->ip6_querier;
3136
own_timer = &brmctx->ip6_own_query.timer;
3137
other_timer = &brmctx->ip6_other_query.timer;
3138
if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0)
3139
goto update;
3140
break;
3141
#endif
3142
default:
3143
return false;
3144
}
3145
3146
if (!timer_pending(own_timer) && !timer_pending(other_timer))
3147
goto update;
3148
3149
return false;
3150
3151
update:
3152
br_multicast_update_querier(brmctx, querier, port_ifidx, saddr);
3153
3154
return true;
3155
}
3156
3157
static struct net_bridge_port *
3158
__br_multicast_get_querier_port(struct net_bridge *br,
3159
const struct bridge_mcast_querier *querier)
3160
{
3161
int port_ifidx = READ_ONCE(querier->port_ifidx);
3162
struct net_bridge_port *p;
3163
struct net_device *dev;
3164
3165
if (port_ifidx == 0)
3166
return NULL;
3167
3168
dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx);
3169
if (!dev)
3170
return NULL;
3171
p = br_port_get_rtnl_rcu(dev);
3172
if (!p || p->br != br)
3173
return NULL;
3174
3175
return p;
3176
}
3177
3178
size_t br_multicast_querier_state_size(void)
3179
{
3180
return nla_total_size(0) + /* nest attribute */
3181
nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */
3182
nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */
3183
nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */
3184
#if IS_ENABLED(CONFIG_IPV6)
3185
nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */
3186
nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */
3187
nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */
3188
#endif
3189
0;
3190
}
3191
3192
/* protected by rtnl or rcu */
3193
int br_multicast_dump_querier_state(struct sk_buff *skb,
3194
const struct net_bridge_mcast *brmctx,
3195
int nest_attr)
3196
{
3197
struct bridge_mcast_querier querier = {};
3198
struct net_bridge_port *p;
3199
struct nlattr *nest;
3200
3201
if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
3202
br_multicast_ctx_vlan_global_disabled(brmctx))
3203
return 0;
3204
3205
nest = nla_nest_start(skb, nest_attr);
3206
if (!nest)
3207
return -EMSGSIZE;
3208
3209
rcu_read_lock();
3210
if (!brmctx->multicast_querier &&
3211
!timer_pending(&brmctx->ip4_other_query.timer))
3212
goto out_v6;
3213
3214
br_multicast_read_querier(&brmctx->ip4_querier, &querier);
3215
if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS,
3216
querier.addr.src.ip4)) {
3217
rcu_read_unlock();
3218
goto out_err;
3219
}
3220
3221
p = __br_multicast_get_querier_port(brmctx->br, &querier);
3222
if (timer_pending(&brmctx->ip4_other_query.timer) &&
3223
(nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER,
3224
br_timer_value(&brmctx->ip4_other_query.timer),
3225
BRIDGE_QUERIER_PAD) ||
3226
(p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) {
3227
rcu_read_unlock();
3228
goto out_err;
3229
}
3230
3231
out_v6:
3232
#if IS_ENABLED(CONFIG_IPV6)
3233
if (!brmctx->multicast_querier &&
3234
!timer_pending(&brmctx->ip6_other_query.timer))
3235
goto out;
3236
3237
br_multicast_read_querier(&brmctx->ip6_querier, &querier);
3238
if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS,
3239
&querier.addr.src.ip6)) {
3240
rcu_read_unlock();
3241
goto out_err;
3242
}
3243
3244
p = __br_multicast_get_querier_port(brmctx->br, &querier);
3245
if (timer_pending(&brmctx->ip6_other_query.timer) &&
3246
(nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER,
3247
br_timer_value(&brmctx->ip6_other_query.timer),
3248
BRIDGE_QUERIER_PAD) ||
3249
(p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT,
3250
p->dev->ifindex)))) {
3251
rcu_read_unlock();
3252
goto out_err;
3253
}
3254
out:
3255
#endif
3256
rcu_read_unlock();
3257
nla_nest_end(skb, nest);
3258
if (!nla_len(nest))
3259
nla_nest_cancel(skb, nest);
3260
3261
return 0;
3262
3263
out_err:
3264
nla_nest_cancel(skb, nest);
3265
return -EMSGSIZE;
3266
}
3267
3268
static void
3269
br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
3270
struct bridge_mcast_other_query *query,
3271
unsigned long max_delay)
3272
{
3273
if (!timer_pending(&query->timer))
3274
mod_timer(&query->delay_timer, jiffies + max_delay);
3275
3276
mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
3277
}
3278
3279
static void br_port_mc_router_state_change(struct net_bridge_port *p,
3280
bool is_mc_router)
3281
{
3282
struct switchdev_attr attr = {
3283
.orig_dev = p->dev,
3284
.id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
3285
.flags = SWITCHDEV_F_DEFER,
3286
.u.mrouter = is_mc_router,
3287
};
3288
3289
switchdev_port_attr_set(p->dev, &attr, NULL);
3290
}
3291
3292
static struct net_bridge_port *
3293
br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
3294
struct hlist_head *mc_router_list,
3295
struct hlist_node *rlist)
3296
{
3297
struct net_bridge_mcast_port *pmctx;
3298
3299
#if IS_ENABLED(CONFIG_IPV6)
3300
if (mc_router_list == &brmctx->ip6_mc_router_list)
3301
pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
3302
ip6_rlist);
3303
else
3304
#endif
3305
pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
3306
ip4_rlist);
3307
3308
return pmctx->port;
3309
}
3310
3311
static struct hlist_node *
3312
br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
3313
struct net_bridge_port *port,
3314
struct hlist_head *mc_router_list)
3315
3316
{
3317
struct hlist_node *slot = NULL;
3318
struct net_bridge_port *p;
3319
struct hlist_node *rlist;
3320
3321
hlist_for_each(rlist, mc_router_list) {
3322
p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
3323
3324
if ((unsigned long)port >= (unsigned long)p)
3325
break;
3326
3327
slot = rlist;
3328
}
3329
3330
return slot;
3331
}
3332
3333
static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
3334
struct hlist_node *rnode)
3335
{
3336
#if IS_ENABLED(CONFIG_IPV6)
3337
if (rnode != &pmctx->ip6_rlist)
3338
return hlist_unhashed(&pmctx->ip6_rlist);
3339
else
3340
return hlist_unhashed(&pmctx->ip4_rlist);
3341
#else
3342
return true;
3343
#endif
3344
}
3345
3346
/* Add port to router_list
3347
* list is maintained ordered by pointer value
3348
* and locked by br->multicast_lock and RCU
3349
*/
3350
static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
3351
struct net_bridge_mcast_port *pmctx,
3352
struct hlist_node *rlist,
3353
struct hlist_head *mc_router_list)
3354
{
3355
struct hlist_node *slot;
3356
3357
if (!hlist_unhashed(rlist))
3358
return;
3359
3360
slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
3361
3362
if (slot)
3363
hlist_add_behind_rcu(rlist, slot);
3364
else
3365
hlist_add_head_rcu(rlist, mc_router_list);
3366
3367
/* For backwards compatibility for now, only notify if we
3368
* switched from no IPv4/IPv6 multicast router to a new
3369
* IPv4 or IPv6 multicast router.
3370
*/
3371
if (br_multicast_no_router_otherpf(pmctx, rlist)) {
3372
br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
3373
br_port_mc_router_state_change(pmctx->port, true);
3374
}
3375
}
3376
3377
/* Add port to router_list
3378
* list is maintained ordered by pointer value
3379
* and locked by br->multicast_lock and RCU
3380
*/
3381
static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
3382
struct net_bridge_mcast_port *pmctx)
3383
{
3384
br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
3385
&brmctx->ip4_mc_router_list);
3386
}
3387
3388
/* Add port to router_list
3389
* list is maintained ordered by pointer value
3390
* and locked by br->multicast_lock and RCU
3391
*/
3392
static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
3393
struct net_bridge_mcast_port *pmctx)
3394
{
3395
#if IS_ENABLED(CONFIG_IPV6)
3396
br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
3397
&brmctx->ip6_mc_router_list);
3398
#endif
3399
}
3400
3401
static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
3402
struct net_bridge_mcast_port *pmctx,
3403
struct timer_list *timer,
3404
struct hlist_node *rlist,
3405
struct hlist_head *mc_router_list)
3406
{
3407
unsigned long now = jiffies;
3408
3409
if (!br_multicast_ctx_should_use(brmctx, pmctx))
3410
return;
3411
3412
if (!pmctx) {
3413
if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
3414
if (!br_ip4_multicast_is_router(brmctx) &&
3415
!br_ip6_multicast_is_router(brmctx))
3416
br_mc_router_state_change(brmctx->br, true);
3417
mod_timer(timer, now + brmctx->multicast_querier_interval);
3418
}
3419
return;
3420
}
3421
3422
if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
3423
pmctx->multicast_router == MDB_RTR_TYPE_PERM)
3424
return;
3425
3426
br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
3427
mod_timer(timer, now + brmctx->multicast_querier_interval);
3428
}
3429
3430
static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
3431
struct net_bridge_mcast_port *pmctx)
3432
{
3433
struct timer_list *timer = &brmctx->ip4_mc_router_timer;
3434
struct hlist_node *rlist = NULL;
3435
3436
if (pmctx) {
3437
timer = &pmctx->ip4_mc_router_timer;
3438
rlist = &pmctx->ip4_rlist;
3439
}
3440
3441
br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3442
&brmctx->ip4_mc_router_list);
3443
}
3444
3445
static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
3446
struct net_bridge_mcast_port *pmctx)
3447
{
3448
#if IS_ENABLED(CONFIG_IPV6)
3449
struct timer_list *timer = &brmctx->ip6_mc_router_timer;
3450
struct hlist_node *rlist = NULL;
3451
3452
if (pmctx) {
3453
timer = &pmctx->ip6_mc_router_timer;
3454
rlist = &pmctx->ip6_rlist;
3455
}
3456
3457
br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3458
&brmctx->ip6_mc_router_list);
3459
#endif
3460
}
3461
3462
static void
3463
br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
3464
struct net_bridge_mcast_port *pmctx,
3465
struct bridge_mcast_other_query *query,
3466
struct br_ip *saddr,
3467
unsigned long max_delay)
3468
{
3469
if (!br_multicast_select_querier(brmctx, pmctx, saddr))
3470
return;
3471
3472
br_multicast_update_query_timer(brmctx, query, max_delay);
3473
br_ip4_multicast_mark_router(brmctx, pmctx);
3474
}
3475
3476
#if IS_ENABLED(CONFIG_IPV6)
3477
static void
3478
br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
3479
struct net_bridge_mcast_port *pmctx,
3480
struct bridge_mcast_other_query *query,
3481
struct br_ip *saddr,
3482
unsigned long max_delay)
3483
{
3484
if (!br_multicast_select_querier(brmctx, pmctx, saddr))
3485
return;
3486
3487
br_multicast_update_query_timer(brmctx, query, max_delay);
3488
br_ip6_multicast_mark_router(brmctx, pmctx);
3489
}
3490
#endif
3491
3492
static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
3493
struct net_bridge_mcast_port *pmctx,
3494
struct sk_buff *skb,
3495
u16 vid)
3496
{
3497
unsigned int transport_len = ip_transport_len(skb);
3498
const struct iphdr *iph = ip_hdr(skb);
3499
struct igmphdr *ih = igmp_hdr(skb);
3500
struct net_bridge_mdb_entry *mp;
3501
struct igmpv3_query *ih3;
3502
struct net_bridge_port_group *p;
3503
struct net_bridge_port_group __rcu **pp;
3504
struct br_ip saddr = {};
3505
unsigned long max_delay;
3506
unsigned long now = jiffies;
3507
__be32 group;
3508
3509
spin_lock(&brmctx->br->multicast_lock);
3510
if (!br_multicast_ctx_should_use(brmctx, pmctx))
3511
goto out;
3512
3513
group = ih->group;
3514
3515
if (transport_len == sizeof(*ih)) {
3516
max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
3517
3518
if (!max_delay) {
3519
max_delay = 10 * HZ;
3520
group = 0;
3521
}
3522
} else if (transport_len >= sizeof(*ih3)) {
3523
ih3 = igmpv3_query_hdr(skb);
3524
if (ih3->nsrcs ||
3525
(brmctx->multicast_igmp_version == 3 && group &&
3526
ih3->suppress))
3527
goto out;
3528
3529
max_delay = ih3->code ?
3530
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
3531
} else {
3532
goto out;
3533
}
3534
3535
if (!group) {
3536
saddr.proto = htons(ETH_P_IP);
3537
saddr.src.ip4 = iph->saddr;
3538
3539
br_ip4_multicast_query_received(brmctx, pmctx,
3540
&brmctx->ip4_other_query,
3541
&saddr, max_delay);
3542
goto out;
3543
}
3544
3545
mp = br_mdb_ip4_get(brmctx->br, group, vid);
3546
if (!mp)
3547
goto out;
3548
3549
max_delay *= brmctx->multicast_last_member_count;
3550
3551
if (mp->host_joined &&
3552
(timer_pending(&mp->timer) ?
3553
time_after(mp->timer.expires, now + max_delay) :
3554
timer_delete_sync_try(&mp->timer) >= 0))
3555
mod_timer(&mp->timer, now + max_delay);
3556
3557
for (pp = &mp->ports;
3558
(p = mlock_dereference(*pp, brmctx->br)) != NULL;
3559
pp = &p->next) {
3560
if (timer_pending(&p->timer) ?
3561
time_after(p->timer.expires, now + max_delay) :
3562
timer_delete_sync_try(&p->timer) >= 0 &&
3563
(brmctx->multicast_igmp_version == 2 ||
3564
p->filter_mode == MCAST_EXCLUDE))
3565
mod_timer(&p->timer, now + max_delay);
3566
}
3567
3568
out:
3569
spin_unlock(&brmctx->br->multicast_lock);
3570
}
3571
3572
#if IS_ENABLED(CONFIG_IPV6)
3573
static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
3574
struct net_bridge_mcast_port *pmctx,
3575
struct sk_buff *skb,
3576
u16 vid)
3577
{
3578
unsigned int transport_len = ipv6_transport_len(skb);
3579
struct mld_msg *mld;
3580
struct net_bridge_mdb_entry *mp;
3581
struct mld2_query *mld2q;
3582
struct net_bridge_port_group *p;
3583
struct net_bridge_port_group __rcu **pp;
3584
struct br_ip saddr = {};
3585
unsigned long max_delay;
3586
unsigned long now = jiffies;
3587
unsigned int offset = skb_transport_offset(skb);
3588
const struct in6_addr *group = NULL;
3589
bool is_general_query;
3590
int err = 0;
3591
3592
spin_lock(&brmctx->br->multicast_lock);
3593
if (!br_multicast_ctx_should_use(brmctx, pmctx))
3594
goto out;
3595
3596
if (transport_len == sizeof(*mld)) {
3597
if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
3598
err = -EINVAL;
3599
goto out;
3600
}
3601
mld = (struct mld_msg *) icmp6_hdr(skb);
3602
max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
3603
if (max_delay)
3604
group = &mld->mld_mca;
3605
} else {
3606
if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
3607
err = -EINVAL;
3608
goto out;
3609
}
3610
mld2q = (struct mld2_query *)icmp6_hdr(skb);
3611
if (!mld2q->mld2q_nsrcs)
3612
group = &mld2q->mld2q_mca;
3613
if (brmctx->multicast_mld_version == 2 &&
3614
!ipv6_addr_any(&mld2q->mld2q_mca) &&
3615
mld2q->mld2q_suppress)
3616
goto out;
3617
3618
max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
3619
}
3620
3621
is_general_query = group && ipv6_addr_any(group);
3622
3623
if (is_general_query) {
3624
saddr.proto = htons(ETH_P_IPV6);
3625
saddr.src.ip6 = ipv6_hdr(skb)->saddr;
3626
3627
br_ip6_multicast_query_received(brmctx, pmctx,
3628
&brmctx->ip6_other_query,
3629
&saddr, max_delay);
3630
goto out;
3631
} else if (!group) {
3632
goto out;
3633
}
3634
3635
mp = br_mdb_ip6_get(brmctx->br, group, vid);
3636
if (!mp)
3637
goto out;
3638
3639
max_delay *= brmctx->multicast_last_member_count;
3640
if (mp->host_joined &&
3641
(timer_pending(&mp->timer) ?
3642
time_after(mp->timer.expires, now + max_delay) :
3643
timer_delete_sync_try(&mp->timer) >= 0))
3644
mod_timer(&mp->timer, now + max_delay);
3645
3646
for (pp = &mp->ports;
3647
(p = mlock_dereference(*pp, brmctx->br)) != NULL;
3648
pp = &p->next) {
3649
if (timer_pending(&p->timer) ?
3650
time_after(p->timer.expires, now + max_delay) :
3651
timer_delete_sync_try(&p->timer) >= 0 &&
3652
(brmctx->multicast_mld_version == 1 ||
3653
p->filter_mode == MCAST_EXCLUDE))
3654
mod_timer(&p->timer, now + max_delay);
3655
}
3656
3657
out:
3658
spin_unlock(&brmctx->br->multicast_lock);
3659
return err;
3660
}
3661
#endif
3662
3663
static void
3664
br_multicast_leave_group(struct net_bridge_mcast *brmctx,
3665
struct net_bridge_mcast_port *pmctx,
3666
struct br_ip *group,
3667
struct bridge_mcast_other_query *other_query,
3668
struct bridge_mcast_own_query *own_query,
3669
const unsigned char *src)
3670
{
3671
struct net_bridge_mdb_entry *mp;
3672
struct net_bridge_port_group *p;
3673
unsigned long now;
3674
unsigned long time;
3675
3676
spin_lock(&brmctx->br->multicast_lock);
3677
if (!br_multicast_ctx_should_use(brmctx, pmctx))
3678
goto out;
3679
3680
mp = br_mdb_ip_get(brmctx->br, group);
3681
if (!mp)
3682
goto out;
3683
3684
if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
3685
struct net_bridge_port_group __rcu **pp;
3686
3687
for (pp = &mp->ports;
3688
(p = mlock_dereference(*pp, brmctx->br)) != NULL;
3689
pp = &p->next) {
3690
if (!br_port_group_equal(p, pmctx->port, src))
3691
continue;
3692
3693
if (p->flags & MDB_PG_FLAGS_PERMANENT)
3694
break;
3695
3696
p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
3697
br_multicast_del_pg(mp, p, pp);
3698
}
3699
goto out;
3700
}
3701
3702
if (timer_pending(&other_query->timer))
3703
goto out;
3704
3705
if (brmctx->multicast_querier) {
3706
__br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
3707
false, 0, NULL);
3708
3709
time = jiffies + brmctx->multicast_last_member_count *
3710
brmctx->multicast_last_member_interval;
3711
3712
mod_timer(&own_query->timer, time);
3713
3714
for (p = mlock_dereference(mp->ports, brmctx->br);
3715
p != NULL && pmctx != NULL;
3716
p = mlock_dereference(p->next, brmctx->br)) {
3717
if (!br_port_group_equal(p, pmctx->port, src))
3718
continue;
3719
3720
if (!hlist_unhashed(&p->mglist) &&
3721
(timer_pending(&p->timer) ?
3722
time_after(p->timer.expires, time) :
3723
timer_delete_sync_try(&p->timer) >= 0)) {
3724
mod_timer(&p->timer, time);
3725
}
3726
3727
break;
3728
}
3729
}
3730
3731
now = jiffies;
3732
time = now + brmctx->multicast_last_member_count *
3733
brmctx->multicast_last_member_interval;
3734
3735
if (!pmctx) {
3736
if (mp->host_joined &&
3737
(timer_pending(&mp->timer) ?
3738
time_after(mp->timer.expires, time) :
3739
timer_delete_sync_try(&mp->timer) >= 0)) {
3740
mod_timer(&mp->timer, time);
3741
}
3742
3743
goto out;
3744
}
3745
3746
for (p = mlock_dereference(mp->ports, brmctx->br);
3747
p != NULL;
3748
p = mlock_dereference(p->next, brmctx->br)) {
3749
if (p->key.port != pmctx->port)
3750
continue;
3751
3752
if (!hlist_unhashed(&p->mglist) &&
3753
(timer_pending(&p->timer) ?
3754
time_after(p->timer.expires, time) :
3755
timer_delete_sync_try(&p->timer) >= 0)) {
3756
mod_timer(&p->timer, time);
3757
}
3758
3759
break;
3760
}
3761
out:
3762
spin_unlock(&brmctx->br->multicast_lock);
3763
}
3764
3765
static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
3766
struct net_bridge_mcast_port *pmctx,
3767
__be32 group,
3768
__u16 vid,
3769
const unsigned char *src)
3770
{
3771
struct br_ip br_group;
3772
struct bridge_mcast_own_query *own_query;
3773
3774
if (ipv4_is_local_multicast(group))
3775
return;
3776
3777
own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
3778
3779
memset(&br_group, 0, sizeof(br_group));
3780
br_group.dst.ip4 = group;
3781
br_group.proto = htons(ETH_P_IP);
3782
br_group.vid = vid;
3783
3784
br_multicast_leave_group(brmctx, pmctx, &br_group,
3785
&brmctx->ip4_other_query,
3786
own_query, src);
3787
}
3788
3789
#if IS_ENABLED(CONFIG_IPV6)
3790
static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
3791
struct net_bridge_mcast_port *pmctx,
3792
const struct in6_addr *group,
3793
__u16 vid,
3794
const unsigned char *src)
3795
{
3796
struct br_ip br_group;
3797
struct bridge_mcast_own_query *own_query;
3798
3799
if (ipv6_addr_is_ll_all_nodes(group))
3800
return;
3801
3802
own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
3803
3804
memset(&br_group, 0, sizeof(br_group));
3805
br_group.dst.ip6 = *group;
3806
br_group.proto = htons(ETH_P_IPV6);
3807
br_group.vid = vid;
3808
3809
br_multicast_leave_group(brmctx, pmctx, &br_group,
3810
&brmctx->ip6_other_query,
3811
own_query, src);
3812
}
3813
#endif
3814
3815
static void br_multicast_err_count(const struct net_bridge *br,
3816
const struct net_bridge_port *p,
3817
__be16 proto)
3818
{
3819
struct bridge_mcast_stats __percpu *stats;
3820
struct bridge_mcast_stats *pstats;
3821
3822
if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3823
return;
3824
3825
if (p)
3826
stats = p->mcast_stats;
3827
else
3828
stats = br->mcast_stats;
3829
if (WARN_ON(!stats))
3830
return;
3831
3832
pstats = this_cpu_ptr(stats);
3833
3834
u64_stats_update_begin(&pstats->syncp);
3835
switch (proto) {
3836
case htons(ETH_P_IP):
3837
pstats->mstats.igmp_parse_errors++;
3838
break;
3839
#if IS_ENABLED(CONFIG_IPV6)
3840
case htons(ETH_P_IPV6):
3841
pstats->mstats.mld_parse_errors++;
3842
break;
3843
#endif
3844
}
3845
u64_stats_update_end(&pstats->syncp);
3846
}
3847
3848
static void br_multicast_pim(struct net_bridge_mcast *brmctx,
3849
struct net_bridge_mcast_port *pmctx,
3850
const struct sk_buff *skb)
3851
{
3852
unsigned int offset = skb_transport_offset(skb);
3853
struct pimhdr *pimhdr, _pimhdr;
3854
3855
pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
3856
if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
3857
pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
3858
return;
3859
3860
spin_lock(&brmctx->br->multicast_lock);
3861
br_ip4_multicast_mark_router(brmctx, pmctx);
3862
spin_unlock(&brmctx->br->multicast_lock);
3863
}
3864
3865
static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3866
struct net_bridge_mcast_port *pmctx,
3867
struct sk_buff *skb)
3868
{
3869
if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
3870
igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
3871
return -ENOMSG;
3872
3873
spin_lock(&brmctx->br->multicast_lock);
3874
br_ip4_multicast_mark_router(brmctx, pmctx);
3875
spin_unlock(&brmctx->br->multicast_lock);
3876
3877
return 0;
3878
}
3879
3880
static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
3881
struct net_bridge_mcast_port *pmctx,
3882
struct sk_buff *skb,
3883
u16 vid)
3884
{
3885
struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3886
const unsigned char *src;
3887
struct igmphdr *ih;
3888
int err;
3889
3890
err = ip_mc_check_igmp(skb);
3891
3892
if (err == -ENOMSG) {
3893
if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
3894
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3895
} else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
3896
if (ip_hdr(skb)->protocol == IPPROTO_PIM)
3897
br_multicast_pim(brmctx, pmctx, skb);
3898
} else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
3899
br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
3900
}
3901
3902
return 0;
3903
} else if (err < 0) {
3904
br_multicast_err_count(brmctx->br, p, skb->protocol);
3905
return err;
3906
}
3907
3908
ih = igmp_hdr(skb);
3909
src = eth_hdr(skb)->h_source;
3910
BR_INPUT_SKB_CB(skb)->igmp = ih->type;
3911
3912
switch (ih->type) {
3913
case IGMP_HOST_MEMBERSHIP_REPORT:
3914
case IGMPV2_HOST_MEMBERSHIP_REPORT:
3915
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3916
err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
3917
src, true);
3918
break;
3919
case IGMPV3_HOST_MEMBERSHIP_REPORT:
3920
err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
3921
break;
3922
case IGMP_HOST_MEMBERSHIP_QUERY:
3923
br_ip4_multicast_query(brmctx, pmctx, skb, vid);
3924
break;
3925
case IGMP_HOST_LEAVE_MESSAGE:
3926
br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
3927
break;
3928
}
3929
3930
br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3931
BR_MCAST_DIR_RX);
3932
3933
return err;
3934
}
3935
3936
#if IS_ENABLED(CONFIG_IPV6)
3937
static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3938
struct net_bridge_mcast_port *pmctx,
3939
struct sk_buff *skb)
3940
{
3941
if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
3942
return;
3943
3944
spin_lock(&brmctx->br->multicast_lock);
3945
br_ip6_multicast_mark_router(brmctx, pmctx);
3946
spin_unlock(&brmctx->br->multicast_lock);
3947
}
3948
3949
static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
3950
struct net_bridge_mcast_port *pmctx,
3951
struct sk_buff *skb,
3952
u16 vid)
3953
{
3954
struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3955
const unsigned char *src;
3956
struct mld_msg *mld;
3957
int err;
3958
3959
err = ipv6_mc_check_mld(skb);
3960
3961
if (err == -ENOMSG || err == -ENODATA) {
3962
if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
3963
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3964
if (err == -ENODATA &&
3965
ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
3966
br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
3967
3968
return 0;
3969
} else if (err < 0) {
3970
br_multicast_err_count(brmctx->br, p, skb->protocol);
3971
return err;
3972
}
3973
3974
mld = (struct mld_msg *)skb_transport_header(skb);
3975
BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
3976
3977
switch (mld->mld_type) {
3978
case ICMPV6_MGM_REPORT:
3979
src = eth_hdr(skb)->h_source;
3980
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3981
err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
3982
vid, src, true);
3983
break;
3984
case ICMPV6_MLD2_REPORT:
3985
err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
3986
break;
3987
case ICMPV6_MGM_QUERY:
3988
err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
3989
break;
3990
case ICMPV6_MGM_REDUCTION:
3991
src = eth_hdr(skb)->h_source;
3992
br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
3993
src);
3994
break;
3995
}
3996
3997
br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3998
BR_MCAST_DIR_RX);
3999
4000
return err;
4001
}
4002
#endif
4003
4004
int br_multicast_rcv(struct net_bridge_mcast **brmctx,
4005
struct net_bridge_mcast_port **pmctx,
4006
struct net_bridge_vlan *vlan,
4007
struct sk_buff *skb, u16 vid)
4008
{
4009
int ret = 0;
4010
4011
BR_INPUT_SKB_CB(skb)->igmp = 0;
4012
BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
4013
4014
if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
4015
return 0;
4016
4017
if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
4018
const struct net_bridge_vlan *masterv;
4019
4020
/* the vlan has the master flag set only when transmitting
4021
* through the bridge device
4022
*/
4023
if (br_vlan_is_master(vlan)) {
4024
masterv = vlan;
4025
*brmctx = &vlan->br_mcast_ctx;
4026
*pmctx = NULL;
4027
} else {
4028
masterv = vlan->brvlan;
4029
*brmctx = &vlan->brvlan->br_mcast_ctx;
4030
*pmctx = &vlan->port_mcast_ctx;
4031
}
4032
4033
if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
4034
return 0;
4035
}
4036
4037
switch (skb->protocol) {
4038
case htons(ETH_P_IP):
4039
ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
4040
break;
4041
#if IS_ENABLED(CONFIG_IPV6)
4042
case htons(ETH_P_IPV6):
4043
ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
4044
break;
4045
#endif
4046
}
4047
4048
return ret;
4049
}
4050
4051
static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
4052
struct bridge_mcast_own_query *query,
4053
struct bridge_mcast_querier *querier)
4054
{
4055
spin_lock(&brmctx->br->multicast_lock);
4056
if (br_multicast_ctx_vlan_disabled(brmctx))
4057
goto out;
4058
4059
if (query->startup_sent < brmctx->multicast_startup_query_count)
4060
query->startup_sent++;
4061
4062
br_multicast_send_query(brmctx, NULL, query);
4063
out:
4064
spin_unlock(&brmctx->br->multicast_lock);
4065
}
4066
4067
static void br_ip4_multicast_query_expired(struct timer_list *t)
4068
{
4069
struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
4070
ip4_own_query.timer);
4071
4072
br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
4073
&brmctx->ip4_querier);
4074
}
4075
4076
#if IS_ENABLED(CONFIG_IPV6)
4077
static void br_ip6_multicast_query_expired(struct timer_list *t)
4078
{
4079
struct net_bridge_mcast *brmctx = timer_container_of(brmctx, t,
4080
ip6_own_query.timer);
4081
4082
br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
4083
&brmctx->ip6_querier);
4084
}
4085
#endif
4086
4087
static void br_multicast_gc_work(struct work_struct *work)
4088
{
4089
struct net_bridge *br = container_of(work, struct net_bridge,
4090
mcast_gc_work);
4091
HLIST_HEAD(deleted_head);
4092
4093
spin_lock_bh(&br->multicast_lock);
4094
hlist_move_list(&br->mcast_gc_list, &deleted_head);
4095
spin_unlock_bh(&br->multicast_lock);
4096
4097
br_multicast_gc(&deleted_head);
4098
}
4099
4100
void br_multicast_ctx_init(struct net_bridge *br,
4101
struct net_bridge_vlan *vlan,
4102
struct net_bridge_mcast *brmctx)
4103
{
4104
brmctx->br = br;
4105
brmctx->vlan = vlan;
4106
brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4107
brmctx->multicast_last_member_count = 2;
4108
brmctx->multicast_startup_query_count = 2;
4109
4110
brmctx->multicast_last_member_interval = HZ;
4111
brmctx->multicast_query_response_interval = 10 * HZ;
4112
brmctx->multicast_startup_query_interval = 125 * HZ / 4;
4113
brmctx->multicast_query_interval = 125 * HZ;
4114
brmctx->multicast_querier_interval = 255 * HZ;
4115
brmctx->multicast_membership_interval = 260 * HZ;
4116
4117
brmctx->ip4_querier.port_ifidx = 0;
4118
seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
4119
brmctx->multicast_igmp_version = 2;
4120
#if IS_ENABLED(CONFIG_IPV6)
4121
brmctx->multicast_mld_version = 1;
4122
brmctx->ip6_querier.port_ifidx = 0;
4123
seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
4124
#endif
4125
4126
timer_setup(&brmctx->ip4_mc_router_timer,
4127
br_ip4_multicast_local_router_expired, 0);
4128
timer_setup(&brmctx->ip4_other_query.timer,
4129
br_ip4_multicast_querier_expired, 0);
4130
timer_setup(&brmctx->ip4_other_query.delay_timer,
4131
br_multicast_query_delay_expired, 0);
4132
timer_setup(&brmctx->ip4_own_query.timer,
4133
br_ip4_multicast_query_expired, 0);
4134
#if IS_ENABLED(CONFIG_IPV6)
4135
timer_setup(&brmctx->ip6_mc_router_timer,
4136
br_ip6_multicast_local_router_expired, 0);
4137
timer_setup(&brmctx->ip6_other_query.timer,
4138
br_ip6_multicast_querier_expired, 0);
4139
timer_setup(&brmctx->ip6_other_query.delay_timer,
4140
br_multicast_query_delay_expired, 0);
4141
timer_setup(&brmctx->ip6_own_query.timer,
4142
br_ip6_multicast_query_expired, 0);
4143
#endif
4144
}
4145
4146
void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
4147
{
4148
__br_multicast_stop(brmctx);
4149
}
4150
4151
void br_multicast_init(struct net_bridge *br)
4152
{
4153
br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
4154
4155
br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
4156
4157
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
4158
br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
4159
4160
spin_lock_init(&br->multicast_lock);
4161
INIT_HLIST_HEAD(&br->mdb_list);
4162
INIT_HLIST_HEAD(&br->mcast_gc_list);
4163
INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
4164
}
4165
4166
static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
4167
{
4168
struct in_device *in_dev = in_dev_get(br->dev);
4169
4170
if (!in_dev)
4171
return;
4172
4173
__ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
4174
in_dev_put(in_dev);
4175
}
4176
4177
#if IS_ENABLED(CONFIG_IPV6)
4178
static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
4179
{
4180
struct in6_addr addr;
4181
4182
ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
4183
ipv6_dev_mc_inc(br->dev, &addr);
4184
}
4185
#else
4186
static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
4187
{
4188
}
4189
#endif
4190
4191
void br_multicast_join_snoopers(struct net_bridge *br)
4192
{
4193
br_ip4_multicast_join_snoopers(br);
4194
br_ip6_multicast_join_snoopers(br);
4195
}
4196
4197
static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
4198
{
4199
struct in_device *in_dev = in_dev_get(br->dev);
4200
4201
if (WARN_ON(!in_dev))
4202
return;
4203
4204
__ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
4205
in_dev_put(in_dev);
4206
}
4207
4208
#if IS_ENABLED(CONFIG_IPV6)
4209
static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
4210
{
4211
struct in6_addr addr;
4212
4213
ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
4214
ipv6_dev_mc_dec(br->dev, &addr);
4215
}
4216
#else
4217
static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
4218
{
4219
}
4220
#endif
4221
4222
void br_multicast_leave_snoopers(struct net_bridge *br)
4223
{
4224
br_ip4_multicast_leave_snoopers(br);
4225
br_ip6_multicast_leave_snoopers(br);
4226
}
4227
4228
static void __br_multicast_open_query(struct net_bridge *br,
4229
struct bridge_mcast_own_query *query)
4230
{
4231
query->startup_sent = 0;
4232
4233
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
4234
return;
4235
4236
mod_timer(&query->timer, jiffies);
4237
}
4238
4239
static void __br_multicast_open(struct net_bridge_mcast *brmctx)
4240
{
4241
__br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
4242
#if IS_ENABLED(CONFIG_IPV6)
4243
__br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
4244
#endif
4245
}
4246
4247
void br_multicast_open(struct net_bridge *br)
4248
{
4249
ASSERT_RTNL();
4250
4251
if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
4252
struct net_bridge_vlan_group *vg;
4253
struct net_bridge_vlan *vlan;
4254
4255
vg = br_vlan_group(br);
4256
if (vg) {
4257
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4258
struct net_bridge_mcast *brmctx;
4259
4260
brmctx = &vlan->br_mcast_ctx;
4261
if (br_vlan_is_brentry(vlan) &&
4262
!br_multicast_ctx_vlan_disabled(brmctx))
4263
__br_multicast_open(&vlan->br_mcast_ctx);
4264
}
4265
}
4266
} else {
4267
__br_multicast_open(&br->multicast_ctx);
4268
}
4269
}
4270
4271
static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
4272
{
4273
timer_delete_sync(&brmctx->ip4_mc_router_timer);
4274
timer_delete_sync(&brmctx->ip4_other_query.timer);
4275
timer_delete_sync(&brmctx->ip4_other_query.delay_timer);
4276
timer_delete_sync(&brmctx->ip4_own_query.timer);
4277
#if IS_ENABLED(CONFIG_IPV6)
4278
timer_delete_sync(&brmctx->ip6_mc_router_timer);
4279
timer_delete_sync(&brmctx->ip6_other_query.timer);
4280
timer_delete_sync(&brmctx->ip6_other_query.delay_timer);
4281
timer_delete_sync(&brmctx->ip6_own_query.timer);
4282
#endif
4283
}
4284
4285
void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, u8 state)
4286
{
4287
#if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
4288
struct net_bridge *br;
4289
4290
if (!br_vlan_should_use(v))
4291
return;
4292
4293
if (br_vlan_is_master(v))
4294
return;
4295
4296
br = v->port->br;
4297
4298
if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
4299
return;
4300
4301
if (br_vlan_state_allowed(state, true))
4302
br_multicast_enable_port_ctx(&v->port_mcast_ctx);
4303
4304
/* Multicast is not disabled for the vlan when it goes in
4305
* blocking state because the timers will expire and stop by
4306
* themselves without sending more queries.
4307
*/
4308
#endif
4309
}
4310
4311
void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
4312
{
4313
struct net_bridge *br;
4314
4315
/* it's okay to check for the flag without the multicast lock because it
4316
* can only change under RTNL -> multicast_lock, we need the latter to
4317
* sync with timers and packets
4318
*/
4319
if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
4320
return;
4321
4322
if (br_vlan_is_master(vlan)) {
4323
br = vlan->br;
4324
4325
if (!br_vlan_is_brentry(vlan) ||
4326
(on &&
4327
br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
4328
return;
4329
4330
spin_lock_bh(&br->multicast_lock);
4331
vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
4332
spin_unlock_bh(&br->multicast_lock);
4333
4334
if (on)
4335
__br_multicast_open(&vlan->br_mcast_ctx);
4336
else
4337
__br_multicast_stop(&vlan->br_mcast_ctx);
4338
} else {
4339
struct net_bridge_mcast *brmctx;
4340
4341
brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
4342
if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
4343
return;
4344
4345
br = vlan->port->br;
4346
spin_lock_bh(&br->multicast_lock);
4347
vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
4348
if (on)
4349
__br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
4350
else
4351
__br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
4352
spin_unlock_bh(&br->multicast_lock);
4353
}
4354
}
4355
4356
static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
4357
{
4358
struct net_bridge_port *p;
4359
4360
if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
4361
return;
4362
4363
list_for_each_entry(p, &vlan->br->port_list, list) {
4364
struct net_bridge_vlan *vport;
4365
4366
vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
4367
if (!vport)
4368
continue;
4369
br_multicast_toggle_one_vlan(vport, on);
4370
}
4371
4372
if (br_vlan_is_brentry(vlan))
4373
br_multicast_toggle_one_vlan(vlan, on);
4374
}
4375
4376
int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
4377
struct netlink_ext_ack *extack)
4378
{
4379
struct net_bridge_vlan_group *vg;
4380
struct net_bridge_vlan *vlan;
4381
struct net_bridge_port *p;
4382
4383
if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
4384
return 0;
4385
4386
if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
4387
NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
4388
return -EINVAL;
4389
}
4390
4391
vg = br_vlan_group(br);
4392
if (!vg)
4393
return 0;
4394
4395
br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);
4396
4397
/* disable/enable non-vlan mcast contexts based on vlan snooping */
4398
if (on)
4399
__br_multicast_stop(&br->multicast_ctx);
4400
else
4401
__br_multicast_open(&br->multicast_ctx);
4402
list_for_each_entry(p, &br->port_list, list) {
4403
if (on)
4404
br_multicast_disable_port_ctx(&p->multicast_ctx);
4405
else
4406
br_multicast_enable_port_ctx(&p->multicast_ctx);
4407
}
4408
4409
list_for_each_entry(vlan, &vg->vlan_list, vlist)
4410
br_multicast_toggle_vlan(vlan, on);
4411
4412
return 0;
4413
}
4414
4415
bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
4416
{
4417
ASSERT_RTNL();
4418
4419
/* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
4420
* requires only RTNL to change
4421
*/
4422
if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
4423
return false;
4424
4425
vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
4426
br_multicast_toggle_vlan(vlan, on);
4427
4428
return true;
4429
}
4430
4431
void br_multicast_stop(struct net_bridge *br)
4432
{
4433
ASSERT_RTNL();
4434
4435
if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
4436
struct net_bridge_vlan_group *vg;
4437
struct net_bridge_vlan *vlan;
4438
4439
vg = br_vlan_group(br);
4440
if (vg) {
4441
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4442
struct net_bridge_mcast *brmctx;
4443
4444
brmctx = &vlan->br_mcast_ctx;
4445
if (br_vlan_is_brentry(vlan) &&
4446
!br_multicast_ctx_vlan_disabled(brmctx))
4447
__br_multicast_stop(&vlan->br_mcast_ctx);
4448
}
4449
}
4450
} else {
4451
__br_multicast_stop(&br->multicast_ctx);
4452
}
4453
}
4454
4455
void br_multicast_dev_del(struct net_bridge *br)
4456
{
4457
struct net_bridge_mdb_entry *mp;
4458
HLIST_HEAD(deleted_head);
4459
struct hlist_node *tmp;
4460
4461
spin_lock_bh(&br->multicast_lock);
4462
hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
4463
br_multicast_del_mdb_entry(mp);
4464
hlist_move_list(&br->mcast_gc_list, &deleted_head);
4465
spin_unlock_bh(&br->multicast_lock);
4466
4467
br_multicast_ctx_deinit(&br->multicast_ctx);
4468
br_multicast_gc(&deleted_head);
4469
cancel_work_sync(&br->mcast_gc_work);
4470
4471
rcu_barrier();
4472
}
4473
4474
int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val)
4475
{
4476
int err = -EINVAL;
4477
4478
spin_lock_bh(&brmctx->br->multicast_lock);
4479
4480
switch (val) {
4481
case MDB_RTR_TYPE_DISABLED:
4482
case MDB_RTR_TYPE_PERM:
4483
br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM);
4484
timer_delete(&brmctx->ip4_mc_router_timer);
4485
#if IS_ENABLED(CONFIG_IPV6)
4486
timer_delete(&brmctx->ip6_mc_router_timer);
4487
#endif
4488
brmctx->multicast_router = val;
4489
err = 0;
4490
break;
4491
case MDB_RTR_TYPE_TEMP_QUERY:
4492
if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
4493
br_mc_router_state_change(brmctx->br, false);
4494
brmctx->multicast_router = val;
4495
err = 0;
4496
break;
4497
}
4498
4499
spin_unlock_bh(&brmctx->br->multicast_lock);
4500
4501
return err;
4502
}
4503
4504
static void
4505
br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
4506
{
4507
if (!deleted)
4508
return;
4509
4510
/* For backwards compatibility for now, only notify if there is
4511
* no multicast router anymore for both IPv4 and IPv6.
4512
*/
4513
if (!hlist_unhashed(&pmctx->ip4_rlist))
4514
return;
4515
#if IS_ENABLED(CONFIG_IPV6)
4516
if (!hlist_unhashed(&pmctx->ip6_rlist))
4517
return;
4518
#endif
4519
4520
br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
4521
br_port_mc_router_state_change(pmctx->port, false);
4522
4523
/* don't allow timer refresh */
4524
if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
4525
pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4526
}
4527
4528
int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
4529
unsigned long val)
4530
{
4531
struct net_bridge_mcast *brmctx;
4532
unsigned long now = jiffies;
4533
int err = -EINVAL;
4534
bool del = false;
4535
4536
brmctx = br_multicast_port_ctx_get_global(pmctx);
4537
spin_lock_bh(&brmctx->br->multicast_lock);
4538
if (pmctx->multicast_router == val) {
4539
/* Refresh the temp router port timer */
4540
if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
4541
mod_timer(&pmctx->ip4_mc_router_timer,
4542
now + brmctx->multicast_querier_interval);
4543
#if IS_ENABLED(CONFIG_IPV6)
4544
mod_timer(&pmctx->ip6_mc_router_timer,
4545
now + brmctx->multicast_querier_interval);
4546
#endif
4547
}
4548
err = 0;
4549
goto unlock;
4550
}
4551
switch (val) {
4552
case MDB_RTR_TYPE_DISABLED:
4553
pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
4554
del |= br_ip4_multicast_rport_del(pmctx);
4555
timer_delete(&pmctx->ip4_mc_router_timer);
4556
del |= br_ip6_multicast_rport_del(pmctx);
4557
#if IS_ENABLED(CONFIG_IPV6)
4558
timer_delete(&pmctx->ip6_mc_router_timer);
4559
#endif
4560
br_multicast_rport_del_notify(pmctx, del);
4561
break;
4562
case MDB_RTR_TYPE_TEMP_QUERY:
4563
pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4564
del |= br_ip4_multicast_rport_del(pmctx);
4565
del |= br_ip6_multicast_rport_del(pmctx);
4566
br_multicast_rport_del_notify(pmctx, del);
4567
break;
4568
case MDB_RTR_TYPE_PERM:
4569
pmctx->multicast_router = MDB_RTR_TYPE_PERM;
4570
timer_delete(&pmctx->ip4_mc_router_timer);
4571
br_ip4_multicast_add_router(brmctx, pmctx);
4572
#if IS_ENABLED(CONFIG_IPV6)
4573
timer_delete(&pmctx->ip6_mc_router_timer);
4574
#endif
4575
br_ip6_multicast_add_router(brmctx, pmctx);
4576
break;
4577
case MDB_RTR_TYPE_TEMP:
4578
pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
4579
br_ip4_multicast_mark_router(brmctx, pmctx);
4580
br_ip6_multicast_mark_router(brmctx, pmctx);
4581
break;
4582
default:
4583
goto unlock;
4584
}
4585
err = 0;
4586
unlock:
4587
spin_unlock_bh(&brmctx->br->multicast_lock);
4588
4589
return err;
4590
}
4591
4592
int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router)
4593
{
4594
int err;
4595
4596
if (br_vlan_is_master(v))
4597
err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router);
4598
else
4599
err = br_multicast_set_port_router(&v->port_mcast_ctx,
4600
mcast_router);
4601
4602
return err;
4603
}
4604
4605
static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
4606
struct bridge_mcast_own_query *query)
4607
{
4608
struct net_bridge_port *port;
4609
4610
if (!br_multicast_ctx_matches_vlan_snooping(brmctx))
4611
return;
4612
4613
__br_multicast_open_query(brmctx->br, query);
4614
4615
rcu_read_lock();
4616
list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
4617
struct bridge_mcast_own_query *ip4_own_query;
4618
#if IS_ENABLED(CONFIG_IPV6)
4619
struct bridge_mcast_own_query *ip6_own_query;
4620
#endif
4621
4622
if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
4623
continue;
4624
4625
if (br_multicast_ctx_is_vlan(brmctx)) {
4626
struct net_bridge_vlan *vlan;
4627
4628
vlan = br_vlan_find(nbp_vlan_group_rcu(port),
4629
brmctx->vlan->vid);
4630
if (!vlan ||
4631
br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
4632
continue;
4633
4634
ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
4635
#if IS_ENABLED(CONFIG_IPV6)
4636
ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
4637
#endif
4638
} else {
4639
ip4_own_query = &port->multicast_ctx.ip4_own_query;
4640
#if IS_ENABLED(CONFIG_IPV6)
4641
ip6_own_query = &port->multicast_ctx.ip6_own_query;
4642
#endif
4643
}
4644
4645
if (query == &brmctx->ip4_own_query)
4646
br_multicast_enable(ip4_own_query);
4647
#if IS_ENABLED(CONFIG_IPV6)
4648
else
4649
br_multicast_enable(ip6_own_query);
4650
#endif
4651
}
4652
rcu_read_unlock();
4653
}
4654
4655
int br_multicast_toggle(struct net_bridge *br, unsigned long val,
4656
struct netlink_ext_ack *extack)
4657
{
4658
struct net_bridge_port *port;
4659
bool change_snoopers = false;
4660
int err = 0;
4661
4662
spin_lock_bh(&br->multicast_lock);
4663
if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
4664
goto unlock;
4665
4666
err = br_mc_disabled_update(br->dev, val, extack);
4667
if (err == -EOPNOTSUPP)
4668
err = 0;
4669
if (err)
4670
goto unlock;
4671
4672
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
4673
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
4674
change_snoopers = true;
4675
goto unlock;
4676
}
4677
4678
if (!netif_running(br->dev))
4679
goto unlock;
4680
4681
br_multicast_open(br);
4682
list_for_each_entry(port, &br->port_list, list)
4683
__br_multicast_enable_port_ctx(&port->multicast_ctx);
4684
4685
change_snoopers = true;
4686
4687
unlock:
4688
spin_unlock_bh(&br->multicast_lock);
4689
4690
/* br_multicast_join_snoopers has the potential to cause
4691
* an MLD Report/Leave to be delivered to br_multicast_rcv,
4692
* which would in turn call br_multicast_add_group, which would
4693
* attempt to acquire multicast_lock. This function should be
4694
* called after the lock has been released to avoid deadlocks on
4695
* multicast_lock.
4696
*
4697
* br_multicast_leave_snoopers does not have the problem since
4698
* br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
4699
* returns without calling br_multicast_ipv4/6_rcv if it's not
4700
* enabled. Moved both functions out just for symmetry.
4701
*/
4702
if (change_snoopers) {
4703
if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
4704
br_multicast_join_snoopers(br);
4705
else
4706
br_multicast_leave_snoopers(br);
4707
}
4708
4709
return err;
4710
}
4711
4712
bool br_multicast_enabled(const struct net_device *dev)
4713
{
4714
struct net_bridge *br = netdev_priv(dev);
4715
4716
return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
4717
}
4718
EXPORT_SYMBOL_GPL(br_multicast_enabled);
4719
4720
bool br_multicast_router(const struct net_device *dev)
4721
{
4722
struct net_bridge *br = netdev_priv(dev);
4723
bool is_router;
4724
4725
spin_lock_bh(&br->multicast_lock);
4726
is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
4727
spin_unlock_bh(&br->multicast_lock);
4728
return is_router;
4729
}
4730
EXPORT_SYMBOL_GPL(br_multicast_router);
4731
4732
int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
4733
{
4734
unsigned long max_delay;
4735
4736
val = !!val;
4737
4738
spin_lock_bh(&brmctx->br->multicast_lock);
4739
if (brmctx->multicast_querier == val)
4740
goto unlock;
4741
4742
WRITE_ONCE(brmctx->multicast_querier, val);
4743
if (!val)
4744
goto unlock;
4745
4746
max_delay = brmctx->multicast_query_response_interval;
4747
4748
if (!timer_pending(&brmctx->ip4_other_query.timer))
4749
mod_timer(&brmctx->ip4_other_query.delay_timer,
4750
jiffies + max_delay);
4751
4752
br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
4753
4754
#if IS_ENABLED(CONFIG_IPV6)
4755
if (!timer_pending(&brmctx->ip6_other_query.timer))
4756
mod_timer(&brmctx->ip6_other_query.delay_timer,
4757
jiffies + max_delay);
4758
4759
br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
4760
#endif
4761
4762
unlock:
4763
spin_unlock_bh(&brmctx->br->multicast_lock);
4764
4765
return 0;
4766
}
4767
4768
int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx,
4769
unsigned long val)
4770
{
4771
/* Currently we support only version 2 and 3 */
4772
switch (val) {
4773
case 2:
4774
case 3:
4775
break;
4776
default:
4777
return -EINVAL;
4778
}
4779
4780
spin_lock_bh(&brmctx->br->multicast_lock);
4781
brmctx->multicast_igmp_version = val;
4782
spin_unlock_bh(&brmctx->br->multicast_lock);
4783
4784
return 0;
4785
}
4786
4787
#if IS_ENABLED(CONFIG_IPV6)
4788
int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
4789
unsigned long val)
4790
{
4791
/* Currently we support version 1 and 2 */
4792
switch (val) {
4793
case 1:
4794
case 2:
4795
break;
4796
default:
4797
return -EINVAL;
4798
}
4799
4800
spin_lock_bh(&brmctx->br->multicast_lock);
4801
brmctx->multicast_mld_version = val;
4802
spin_unlock_bh(&brmctx->br->multicast_lock);
4803
4804
return 0;
4805
}
4806
#endif
4807
4808
void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
4809
unsigned long val)
4810
{
4811
unsigned long intvl_jiffies = clock_t_to_jiffies(val);
4812
4813
if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
4814
br_info(brmctx->br,
4815
"trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
4816
jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
4817
jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
4818
intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
4819
}
4820
4821
if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) {
4822
br_info(brmctx->br,
4823
"trying to set multicast query interval above maximum, setting to %lu (%ums)\n",
4824
jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX),
4825
jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX));
4826
intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX;
4827
}
4828
4829
brmctx->multicast_query_interval = intvl_jiffies;
4830
}
4831
4832
void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
4833
unsigned long val)
4834
{
4835
unsigned long intvl_jiffies = clock_t_to_jiffies(val);
4836
4837
if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
4838
br_info(brmctx->br,
4839
"trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
4840
jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
4841
jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
4842
intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
4843
}
4844
4845
if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) {
4846
br_info(brmctx->br,
4847
"trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n",
4848
jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX),
4849
jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX));
4850
intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX;
4851
}
4852
4853
brmctx->multicast_startup_query_interval = intvl_jiffies;
4854
}
4855
4856
/**
4857
* br_multicast_list_adjacent - Returns snooped multicast addresses
4858
* @dev: The bridge port adjacent to which to retrieve addresses
4859
* @br_ip_list: The list to store found, snooped multicast IP addresses in
4860
*
4861
* Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
4862
* snooping feature on all bridge ports of dev's bridge device, excluding
4863
* the addresses from dev itself.
4864
*
4865
* Returns the number of items added to br_ip_list.
4866
*
4867
* Notes:
4868
* - br_ip_list needs to be initialized by caller
4869
* - br_ip_list might contain duplicates in the end
4870
* (needs to be taken care of by caller)
4871
* - br_ip_list needs to be freed by caller
4872
*/
4873
int br_multicast_list_adjacent(struct net_device *dev,
4874
struct list_head *br_ip_list)
4875
{
4876
struct net_bridge *br;
4877
struct net_bridge_port *port;
4878
struct net_bridge_port_group *group;
4879
struct br_ip_list *entry;
4880
int count = 0;
4881
4882
rcu_read_lock();
4883
if (!br_ip_list || !netif_is_bridge_port(dev))
4884
goto unlock;
4885
4886
port = br_port_get_rcu(dev);
4887
if (!port || !port->br)
4888
goto unlock;
4889
4890
br = port->br;
4891
4892
list_for_each_entry_rcu(port, &br->port_list, list) {
4893
if (!port->dev || port->dev == dev)
4894
continue;
4895
4896
hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
4897
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
4898
if (!entry)
4899
goto unlock;
4900
4901
entry->addr = group->key.addr;
4902
list_add(&entry->list, br_ip_list);
4903
count++;
4904
}
4905
}
4906
4907
unlock:
4908
rcu_read_unlock();
4909
return count;
4910
}
4911
EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
4912
4913
/**
4914
* br_multicast_has_querier_anywhere - Checks for a querier on a bridge
4915
* @dev: The bridge port providing the bridge on which to check for a querier
4916
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4917
*
4918
* Checks whether the given interface has a bridge on top and if so returns
4919
* true if a valid querier exists anywhere on the bridged link layer.
4920
* Otherwise returns false.
4921
*/
4922
bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
4923
{
4924
struct net_bridge *br;
4925
struct net_bridge_port *port;
4926
struct ethhdr eth;
4927
bool ret = false;
4928
4929
rcu_read_lock();
4930
if (!netif_is_bridge_port(dev))
4931
goto unlock;
4932
4933
port = br_port_get_rcu(dev);
4934
if (!port || !port->br)
4935
goto unlock;
4936
4937
br = port->br;
4938
4939
memset(&eth, 0, sizeof(eth));
4940
eth.h_proto = htons(proto);
4941
4942
ret = br_multicast_querier_exists(&br->multicast_ctx, &eth, NULL);
4943
4944
unlock:
4945
rcu_read_unlock();
4946
return ret;
4947
}
4948
EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
4949
4950
/**
4951
* br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
4952
* @dev: The bridge port adjacent to which to check for a querier
4953
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4954
*
4955
* Checks whether the given interface has a bridge on top and if so returns
4956
* true if a selected querier is behind one of the other ports of this
4957
* bridge. Otherwise returns false.
4958
*/
4959
bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
4960
{
4961
struct net_bridge_mcast *brmctx;
4962
struct net_bridge *br;
4963
struct net_bridge_port *port;
4964
bool ret = false;
4965
int port_ifidx;
4966
4967
rcu_read_lock();
4968
if (!netif_is_bridge_port(dev))
4969
goto unlock;
4970
4971
port = br_port_get_rcu(dev);
4972
if (!port || !port->br)
4973
goto unlock;
4974
4975
br = port->br;
4976
brmctx = &br->multicast_ctx;
4977
4978
switch (proto) {
4979
case ETH_P_IP:
4980
port_ifidx = brmctx->ip4_querier.port_ifidx;
4981
if (!timer_pending(&brmctx->ip4_other_query.timer) ||
4982
port_ifidx == port->dev->ifindex)
4983
goto unlock;
4984
break;
4985
#if IS_ENABLED(CONFIG_IPV6)
4986
case ETH_P_IPV6:
4987
port_ifidx = brmctx->ip6_querier.port_ifidx;
4988
if (!timer_pending(&brmctx->ip6_other_query.timer) ||
4989
port_ifidx == port->dev->ifindex)
4990
goto unlock;
4991
break;
4992
#endif
4993
default:
4994
goto unlock;
4995
}
4996
4997
ret = true;
4998
unlock:
4999
rcu_read_unlock();
5000
return ret;
5001
}
5002
EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
5003
5004
/**
5005
* br_multicast_has_router_adjacent - Checks for a router behind a bridge port
5006
* @dev: The bridge port adjacent to which to check for a multicast router
5007
* @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
5008
*
5009
* Checks whether the given interface has a bridge on top and if so returns
5010
* true if a multicast router is behind one of the other ports of this
5011
* bridge. Otherwise returns false.
5012
*/
5013
bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
5014
{
5015
struct net_bridge_mcast_port *pmctx;
5016
struct net_bridge_mcast *brmctx;
5017
struct net_bridge_port *port;
5018
bool ret = false;
5019
5020
rcu_read_lock();
5021
port = br_port_get_check_rcu(dev);
5022
if (!port)
5023
goto unlock;
5024
5025
brmctx = &port->br->multicast_ctx;
5026
switch (proto) {
5027
case ETH_P_IP:
5028
hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
5029
ip4_rlist) {
5030
if (pmctx->port == port)
5031
continue;
5032
5033
ret = true;
5034
goto unlock;
5035
}
5036
break;
5037
#if IS_ENABLED(CONFIG_IPV6)
5038
case ETH_P_IPV6:
5039
hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
5040
ip6_rlist) {
5041
if (pmctx->port == port)
5042
continue;
5043
5044
ret = true;
5045
goto unlock;
5046
}
5047
break;
5048
#endif
5049
default:
5050
/* when compiled without IPv6 support, be conservative and
5051
* always assume presence of an IPv6 multicast router
5052
*/
5053
ret = true;
5054
}
5055
5056
unlock:
5057
rcu_read_unlock();
5058
return ret;
5059
}
5060
EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
5061
5062
static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
5063
const struct sk_buff *skb, u8 type, u8 dir)
5064
{
5065
struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
5066
__be16 proto = skb->protocol;
5067
unsigned int t_len;
5068
5069
u64_stats_update_begin(&pstats->syncp);
5070
switch (proto) {
5071
case htons(ETH_P_IP):
5072
t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
5073
switch (type) {
5074
case IGMP_HOST_MEMBERSHIP_REPORT:
5075
pstats->mstats.igmp_v1reports[dir]++;
5076
break;
5077
case IGMPV2_HOST_MEMBERSHIP_REPORT:
5078
pstats->mstats.igmp_v2reports[dir]++;
5079
break;
5080
case IGMPV3_HOST_MEMBERSHIP_REPORT:
5081
pstats->mstats.igmp_v3reports[dir]++;
5082
break;
5083
case IGMP_HOST_MEMBERSHIP_QUERY:
5084
if (t_len != sizeof(struct igmphdr)) {
5085
pstats->mstats.igmp_v3queries[dir]++;
5086
} else {
5087
unsigned int offset = skb_transport_offset(skb);
5088
struct igmphdr *ih, _ihdr;
5089
5090
ih = skb_header_pointer(skb, offset,
5091
sizeof(_ihdr), &_ihdr);
5092
if (!ih)
5093
break;
5094
if (!ih->code)
5095
pstats->mstats.igmp_v1queries[dir]++;
5096
else
5097
pstats->mstats.igmp_v2queries[dir]++;
5098
}
5099
break;
5100
case IGMP_HOST_LEAVE_MESSAGE:
5101
pstats->mstats.igmp_leaves[dir]++;
5102
break;
5103
}
5104
break;
5105
#if IS_ENABLED(CONFIG_IPV6)
5106
case htons(ETH_P_IPV6):
5107
t_len = ntohs(ipv6_hdr(skb)->payload_len) +
5108
sizeof(struct ipv6hdr);
5109
t_len -= skb_network_header_len(skb);
5110
switch (type) {
5111
case ICMPV6_MGM_REPORT:
5112
pstats->mstats.mld_v1reports[dir]++;
5113
break;
5114
case ICMPV6_MLD2_REPORT:
5115
pstats->mstats.mld_v2reports[dir]++;
5116
break;
5117
case ICMPV6_MGM_QUERY:
5118
if (t_len != sizeof(struct mld_msg))
5119
pstats->mstats.mld_v2queries[dir]++;
5120
else
5121
pstats->mstats.mld_v1queries[dir]++;
5122
break;
5123
case ICMPV6_MGM_REDUCTION:
5124
pstats->mstats.mld_leaves[dir]++;
5125
break;
5126
}
5127
break;
5128
#endif /* CONFIG_IPV6 */
5129
}
5130
u64_stats_update_end(&pstats->syncp);
5131
}
5132
5133
void br_multicast_count(struct net_bridge *br,
5134
const struct net_bridge_port *p,
5135
const struct sk_buff *skb, u8 type, u8 dir)
5136
{
5137
struct bridge_mcast_stats __percpu *stats;
5138
5139
/* if multicast_disabled is true then igmp type can't be set */
5140
if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
5141
return;
5142
5143
if (p)
5144
stats = p->mcast_stats;
5145
else
5146
stats = br->mcast_stats;
5147
if (WARN_ON(!stats))
5148
return;
5149
5150
br_mcast_stats_add(stats, skb, type, dir);
5151
}
5152
5153
int br_multicast_init_stats(struct net_bridge *br)
5154
{
5155
br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
5156
if (!br->mcast_stats)
5157
return -ENOMEM;
5158
5159
return 0;
5160
}
5161
5162
void br_multicast_uninit_stats(struct net_bridge *br)
5163
{
5164
free_percpu(br->mcast_stats);
5165
}
5166
5167
/* noinline for https://llvm.org/pr45802#c9 */
5168
static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
5169
{
5170
dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
5171
dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
5172
}
5173
5174
void br_multicast_get_stats(const struct net_bridge *br,
5175
const struct net_bridge_port *p,
5176
struct br_mcast_stats *dest)
5177
{
5178
struct bridge_mcast_stats __percpu *stats;
5179
struct br_mcast_stats tdst;
5180
int i;
5181
5182
memset(dest, 0, sizeof(*dest));
5183
if (p)
5184
stats = p->mcast_stats;
5185
else
5186
stats = br->mcast_stats;
5187
if (WARN_ON(!stats))
5188
return;
5189
5190
memset(&tdst, 0, sizeof(tdst));
5191
for_each_possible_cpu(i) {
5192
struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
5193
struct br_mcast_stats temp;
5194
unsigned int start;
5195
5196
do {
5197
start = u64_stats_fetch_begin(&cpu_stats->syncp);
5198
memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
5199
} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
5200
5201
mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
5202
mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
5203
mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
5204
mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
5205
mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
5206
mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
5207
mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
5208
tdst.igmp_parse_errors += temp.igmp_parse_errors;
5209
5210
mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
5211
mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
5212
mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
5213
mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
5214
mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
5215
tdst.mld_parse_errors += temp.mld_parse_errors;
5216
}
5217
memcpy(dest, &tdst, sizeof(*dest));
5218
}
5219
5220
int br_mdb_hash_init(struct net_bridge *br)
5221
{
5222
int err;
5223
5224
err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
5225
if (err)
5226
return err;
5227
5228
err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
5229
if (err) {
5230
rhashtable_destroy(&br->sg_port_tbl);
5231
return err;
5232
}
5233
5234
return 0;
5235
}
5236
5237
void br_mdb_hash_fini(struct net_bridge *br)
5238
{
5239
rhashtable_destroy(&br->sg_port_tbl);
5240
rhashtable_destroy(&br->mdb_hash_tbl);
5241
}
5242
5243