Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/bridge/br_vlan.c
26285 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
#include <linux/kernel.h>
3
#include <linux/netdevice.h>
4
#include <linux/rtnetlink.h>
5
#include <linux/slab.h>
6
#include <net/switchdev.h>
7
8
#include "br_private.h"
9
#include "br_private_tunnel.h"
10
11
static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
12
13
static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
14
const void *ptr)
15
{
16
const struct net_bridge_vlan *vle = ptr;
17
u16 vid = *(u16 *)arg->key;
18
19
return vle->vid != vid;
20
}
21
22
static const struct rhashtable_params br_vlan_rht_params = {
23
.head_offset = offsetof(struct net_bridge_vlan, vnode),
24
.key_offset = offsetof(struct net_bridge_vlan, vid),
25
.key_len = sizeof(u16),
26
.nelem_hint = 3,
27
.max_size = VLAN_N_VID,
28
.obj_cmpfn = br_vlan_cmp,
29
.automatic_shrinking = true,
30
};
31
32
static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
33
{
34
return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
35
}
36
37
static void __vlan_add_pvid(struct net_bridge_vlan_group *vg,
38
const struct net_bridge_vlan *v)
39
{
40
if (vg->pvid == v->vid)
41
return;
42
43
smp_wmb();
44
br_vlan_set_pvid_state(vg, v->state);
45
vg->pvid = v->vid;
46
}
47
48
static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
49
{
50
if (vg->pvid != vid)
51
return;
52
53
smp_wmb();
54
vg->pvid = 0;
55
}
56
57
/* Update the BRIDGE_VLAN_INFO_PVID and BRIDGE_VLAN_INFO_UNTAGGED flags of @v.
58
* If @commit is false, return just whether the BRIDGE_VLAN_INFO_PVID and
59
* BRIDGE_VLAN_INFO_UNTAGGED bits of @flags would produce any change onto @v.
60
*/
61
static bool __vlan_flags_update(struct net_bridge_vlan *v, u16 flags,
62
bool commit)
63
{
64
struct net_bridge_vlan_group *vg;
65
bool change;
66
67
if (br_vlan_is_master(v))
68
vg = br_vlan_group(v->br);
69
else
70
vg = nbp_vlan_group(v->port);
71
72
/* check if anything would be changed on commit */
73
change = !!(flags & BRIDGE_VLAN_INFO_PVID) == !!(vg->pvid != v->vid) ||
74
((flags ^ v->flags) & BRIDGE_VLAN_INFO_UNTAGGED);
75
76
if (!commit)
77
goto out;
78
79
if (flags & BRIDGE_VLAN_INFO_PVID)
80
__vlan_add_pvid(vg, v);
81
else
82
__vlan_delete_pvid(vg, v->vid);
83
84
if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
85
v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
86
else
87
v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
88
89
out:
90
return change;
91
}
92
93
static bool __vlan_flags_would_change(struct net_bridge_vlan *v, u16 flags)
94
{
95
return __vlan_flags_update(v, flags, false);
96
}
97
98
static void __vlan_flags_commit(struct net_bridge_vlan *v, u16 flags)
99
{
100
__vlan_flags_update(v, flags, true);
101
}
102
103
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
104
struct net_bridge_vlan *v, u16 flags,
105
struct netlink_ext_ack *extack)
106
{
107
int err;
108
109
/* Try switchdev op first. In case it is not supported, fallback to
110
* 8021q add.
111
*/
112
err = br_switchdev_port_vlan_add(dev, v->vid, flags, false, extack);
113
if (err == -EOPNOTSUPP)
114
return vlan_vid_add(dev, br->vlan_proto, v->vid);
115
v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
116
return err;
117
}
118
119
static void __vlan_add_list(struct net_bridge_vlan *v)
120
{
121
struct net_bridge_vlan_group *vg;
122
struct list_head *headp, *hpos;
123
struct net_bridge_vlan *vent;
124
125
if (br_vlan_is_master(v))
126
vg = br_vlan_group(v->br);
127
else
128
vg = nbp_vlan_group(v->port);
129
130
headp = &vg->vlan_list;
131
list_for_each_prev(hpos, headp) {
132
vent = list_entry(hpos, struct net_bridge_vlan, vlist);
133
if (v->vid >= vent->vid)
134
break;
135
}
136
list_add_rcu(&v->vlist, hpos);
137
}
138
139
static void __vlan_del_list(struct net_bridge_vlan *v)
140
{
141
list_del_rcu(&v->vlist);
142
}
143
144
static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
145
const struct net_bridge_vlan *v)
146
{
147
int err;
148
149
/* Try switchdev op first. In case it is not supported, fallback to
150
* 8021q del.
151
*/
152
err = br_switchdev_port_vlan_del(dev, v->vid);
153
if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
154
vlan_vid_del(dev, br->vlan_proto, v->vid);
155
return err == -EOPNOTSUPP ? 0 : err;
156
}
157
158
/* Returns a master vlan, if it didn't exist it gets created. In all cases
159
* a reference is taken to the master vlan before returning.
160
*/
161
static struct net_bridge_vlan *
162
br_vlan_get_master(struct net_bridge *br, u16 vid,
163
struct netlink_ext_ack *extack)
164
{
165
struct net_bridge_vlan_group *vg;
166
struct net_bridge_vlan *masterv;
167
168
vg = br_vlan_group(br);
169
masterv = br_vlan_find(vg, vid);
170
if (!masterv) {
171
bool changed;
172
173
/* missing global ctx, create it now */
174
if (br_vlan_add(br, vid, 0, &changed, extack))
175
return NULL;
176
masterv = br_vlan_find(vg, vid);
177
if (WARN_ON(!masterv))
178
return NULL;
179
refcount_set(&masterv->refcnt, 1);
180
return masterv;
181
}
182
refcount_inc(&masterv->refcnt);
183
184
return masterv;
185
}
186
187
static void br_master_vlan_rcu_free(struct rcu_head *rcu)
188
{
189
struct net_bridge_vlan *v;
190
191
v = container_of(rcu, struct net_bridge_vlan, rcu);
192
WARN_ON(!br_vlan_is_master(v));
193
free_percpu(v->stats);
194
v->stats = NULL;
195
kfree(v);
196
}
197
198
static void br_vlan_put_master(struct net_bridge_vlan *masterv)
199
{
200
struct net_bridge_vlan_group *vg;
201
202
if (!br_vlan_is_master(masterv))
203
return;
204
205
vg = br_vlan_group(masterv->br);
206
if (refcount_dec_and_test(&masterv->refcnt)) {
207
rhashtable_remove_fast(&vg->vlan_hash,
208
&masterv->vnode, br_vlan_rht_params);
209
__vlan_del_list(masterv);
210
br_multicast_toggle_one_vlan(masterv, false);
211
br_multicast_ctx_deinit(&masterv->br_mcast_ctx);
212
call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
213
}
214
}
215
216
static void nbp_vlan_rcu_free(struct rcu_head *rcu)
217
{
218
struct net_bridge_vlan *v;
219
220
v = container_of(rcu, struct net_bridge_vlan, rcu);
221
WARN_ON(br_vlan_is_master(v));
222
/* if we had per-port stats configured then free them here */
223
if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
224
free_percpu(v->stats);
225
v->stats = NULL;
226
kfree(v);
227
}
228
229
static void br_vlan_init_state(struct net_bridge_vlan *v)
230
{
231
struct net_bridge *br;
232
233
if (br_vlan_is_master(v))
234
br = v->br;
235
else
236
br = v->port->br;
237
238
if (br_opt_get(br, BROPT_MST_ENABLED)) {
239
br_mst_vlan_init_state(v);
240
return;
241
}
242
243
v->state = BR_STATE_FORWARDING;
244
v->msti = 0;
245
}
246
247
/* This is the shared VLAN add function which works for both ports and bridge
248
* devices. There are four possible calls to this function in terms of the
249
* vlan entry type:
250
* 1. vlan is being added on a port (no master flags, global entry exists)
251
* 2. vlan is being added on a bridge (both master and brentry flags)
252
* 3. vlan is being added on a port, but a global entry didn't exist which
253
* is being created right now (master flag set, brentry flag unset), the
254
* global entry is used for global per-vlan features, but not for filtering
255
* 4. same as 3 but with both master and brentry flags set so the entry
256
* will be used for filtering in both the port and the bridge
257
*/
258
static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
259
struct netlink_ext_ack *extack)
260
{
261
struct net_bridge_vlan *masterv = NULL;
262
struct net_bridge_port *p = NULL;
263
struct net_bridge_vlan_group *vg;
264
struct net_device *dev;
265
struct net_bridge *br;
266
int err;
267
268
if (br_vlan_is_master(v)) {
269
br = v->br;
270
dev = br->dev;
271
vg = br_vlan_group(br);
272
} else {
273
p = v->port;
274
br = p->br;
275
dev = p->dev;
276
vg = nbp_vlan_group(p);
277
}
278
279
if (p) {
280
/* Add VLAN to the device filter if it is supported.
281
* This ensures tagged traffic enters the bridge when
282
* promiscuous mode is disabled by br_manage_promisc().
283
*/
284
err = __vlan_vid_add(dev, br, v, flags, extack);
285
if (err)
286
goto out;
287
288
/* need to work on the master vlan too */
289
if (flags & BRIDGE_VLAN_INFO_MASTER) {
290
bool changed;
291
292
err = br_vlan_add(br, v->vid,
293
flags | BRIDGE_VLAN_INFO_BRENTRY,
294
&changed, extack);
295
if (err)
296
goto out_filt;
297
298
if (changed)
299
br_vlan_notify(br, NULL, v->vid, 0,
300
RTM_NEWVLAN);
301
}
302
303
masterv = br_vlan_get_master(br, v->vid, extack);
304
if (!masterv) {
305
err = -ENOMEM;
306
goto out_filt;
307
}
308
v->brvlan = masterv;
309
if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
310
v->stats =
311
netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
312
if (!v->stats) {
313
err = -ENOMEM;
314
goto out_filt;
315
}
316
v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
317
} else {
318
v->stats = masterv->stats;
319
}
320
br_multicast_port_ctx_init(p, v, &v->port_mcast_ctx);
321
} else {
322
if (br_vlan_should_use(v)) {
323
err = br_switchdev_port_vlan_add(dev, v->vid, flags,
324
false, extack);
325
if (err && err != -EOPNOTSUPP)
326
goto out;
327
}
328
br_multicast_ctx_init(br, v, &v->br_mcast_ctx);
329
v->priv_flags |= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
330
}
331
332
/* Add the dev mac and count the vlan only if it's usable */
333
if (br_vlan_should_use(v)) {
334
err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
335
if (err) {
336
br_err(br, "failed insert local address into bridge forwarding table\n");
337
goto out_filt;
338
}
339
vg->num_vlans++;
340
}
341
342
/* set the state before publishing */
343
br_vlan_init_state(v);
344
345
err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
346
br_vlan_rht_params);
347
if (err)
348
goto out_fdb_insert;
349
350
__vlan_add_list(v);
351
__vlan_flags_commit(v, flags);
352
br_multicast_toggle_one_vlan(v, true);
353
354
if (p)
355
nbp_vlan_set_vlan_dev_state(p, v->vid);
356
out:
357
return err;
358
359
out_fdb_insert:
360
if (br_vlan_should_use(v)) {
361
br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
362
vg->num_vlans--;
363
}
364
365
out_filt:
366
if (p) {
367
__vlan_vid_del(dev, br, v);
368
if (masterv) {
369
if (v->stats && masterv->stats != v->stats)
370
free_percpu(v->stats);
371
v->stats = NULL;
372
373
br_vlan_put_master(masterv);
374
v->brvlan = NULL;
375
}
376
} else {
377
br_switchdev_port_vlan_del(dev, v->vid);
378
}
379
380
goto out;
381
}
382
383
static int __vlan_del(struct net_bridge_vlan *v)
384
{
385
struct net_bridge_vlan *masterv = v;
386
struct net_bridge_vlan_group *vg;
387
struct net_bridge_port *p = NULL;
388
int err = 0;
389
390
if (br_vlan_is_master(v)) {
391
vg = br_vlan_group(v->br);
392
} else {
393
p = v->port;
394
vg = nbp_vlan_group(v->port);
395
masterv = v->brvlan;
396
}
397
398
__vlan_delete_pvid(vg, v->vid);
399
if (p) {
400
err = __vlan_vid_del(p->dev, p->br, v);
401
if (err)
402
goto out;
403
} else {
404
err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
405
if (err && err != -EOPNOTSUPP)
406
goto out;
407
err = 0;
408
}
409
410
if (br_vlan_should_use(v)) {
411
v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
412
vg->num_vlans--;
413
}
414
415
if (masterv != v) {
416
vlan_tunnel_info_del(vg, v);
417
rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
418
br_vlan_rht_params);
419
__vlan_del_list(v);
420
nbp_vlan_set_vlan_dev_state(p, v->vid);
421
br_multicast_toggle_one_vlan(v, false);
422
br_multicast_port_ctx_deinit(&v->port_mcast_ctx);
423
call_rcu(&v->rcu, nbp_vlan_rcu_free);
424
}
425
426
br_vlan_put_master(masterv);
427
out:
428
return err;
429
}
430
431
static void __vlan_group_free(struct net_bridge_vlan_group *vg)
432
{
433
WARN_ON(!list_empty(&vg->vlan_list));
434
rhashtable_destroy(&vg->vlan_hash);
435
vlan_tunnel_deinit(vg);
436
kfree(vg);
437
}
438
439
static void __vlan_flush(const struct net_bridge *br,
440
const struct net_bridge_port *p,
441
struct net_bridge_vlan_group *vg)
442
{
443
struct net_bridge_vlan *vlan, *tmp;
444
u16 v_start = 0, v_end = 0;
445
int err;
446
447
__vlan_delete_pvid(vg, vg->pvid);
448
list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
449
/* take care of disjoint ranges */
450
if (!v_start) {
451
v_start = vlan->vid;
452
} else if (vlan->vid - v_end != 1) {
453
/* found range end, notify and start next one */
454
br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
455
v_start = vlan->vid;
456
}
457
v_end = vlan->vid;
458
459
err = __vlan_del(vlan);
460
if (err) {
461
br_err(br,
462
"port %u(%s) failed to delete vlan %d: %pe\n",
463
(unsigned int) p->port_no, p->dev->name,
464
vlan->vid, ERR_PTR(err));
465
}
466
}
467
468
/* notify about the last/whole vlan range */
469
if (v_start)
470
br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
471
}
472
473
struct sk_buff *br_handle_vlan(struct net_bridge *br,
474
const struct net_bridge_port *p,
475
struct net_bridge_vlan_group *vg,
476
struct sk_buff *skb)
477
{
478
struct pcpu_sw_netstats *stats;
479
struct net_bridge_vlan *v;
480
u16 vid;
481
482
/* If this packet was not filtered at input, let it pass */
483
if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
484
goto out;
485
486
/* At this point, we know that the frame was filtered and contains
487
* a valid vlan id. If the vlan id has untagged flag set,
488
* send untagged; otherwise, send tagged.
489
*/
490
br_vlan_get_tag(skb, &vid);
491
v = br_vlan_find(vg, vid);
492
/* Vlan entry must be configured at this point. The
493
* only exception is the bridge is set in promisc mode and the
494
* packet is destined for the bridge device. In this case
495
* pass the packet as is.
496
*/
497
if (!v || !br_vlan_should_use(v)) {
498
if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
499
goto out;
500
} else {
501
kfree_skb(skb);
502
return NULL;
503
}
504
}
505
if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
506
stats = this_cpu_ptr(v->stats);
507
u64_stats_update_begin(&stats->syncp);
508
u64_stats_add(&stats->tx_bytes, skb->len);
509
u64_stats_inc(&stats->tx_packets);
510
u64_stats_update_end(&stats->syncp);
511
}
512
513
/* If the skb will be sent using forwarding offload, the assumption is
514
* that the switchdev will inject the packet into hardware together
515
* with the bridge VLAN, so that it can be forwarded according to that
516
* VLAN. The switchdev should deal with popping the VLAN header in
517
* hardware on each egress port as appropriate. So only strip the VLAN
518
* header if forwarding offload is not being used.
519
*/
520
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED &&
521
!br_switchdev_frame_uses_tx_fwd_offload(skb))
522
__vlan_hwaccel_clear_tag(skb);
523
524
if (p && (p->flags & BR_VLAN_TUNNEL) &&
525
br_handle_egress_vlan_tunnel(skb, v)) {
526
kfree_skb(skb);
527
return NULL;
528
}
529
out:
530
return skb;
531
}
532
533
/* Called under RCU */
534
static bool __allowed_ingress(const struct net_bridge *br,
535
struct net_bridge_vlan_group *vg,
536
struct sk_buff *skb, u16 *vid,
537
u8 *state,
538
struct net_bridge_vlan **vlan)
539
{
540
struct pcpu_sw_netstats *stats;
541
struct net_bridge_vlan *v;
542
bool tagged;
543
544
BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
545
/* If vlan tx offload is disabled on bridge device and frame was
546
* sent from vlan device on the bridge device, it does not have
547
* HW accelerated vlan tag.
548
*/
549
if (unlikely(!skb_vlan_tag_present(skb) &&
550
skb->protocol == br->vlan_proto)) {
551
skb = skb_vlan_untag(skb);
552
if (unlikely(!skb))
553
return false;
554
}
555
556
if (!br_vlan_get_tag(skb, vid)) {
557
/* Tagged frame */
558
if (skb->vlan_proto != br->vlan_proto) {
559
/* Protocol-mismatch, empty out vlan_tci for new tag */
560
skb_push(skb, ETH_HLEN);
561
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
562
skb_vlan_tag_get(skb));
563
if (unlikely(!skb))
564
return false;
565
566
skb_pull(skb, ETH_HLEN);
567
skb_reset_mac_len(skb);
568
*vid = 0;
569
tagged = false;
570
} else {
571
tagged = true;
572
}
573
} else {
574
/* Untagged frame */
575
tagged = false;
576
}
577
578
if (!*vid) {
579
u16 pvid = br_get_pvid(vg);
580
581
/* Frame had a tag with VID 0 or did not have a tag.
582
* See if pvid is set on this port. That tells us which
583
* vlan untagged or priority-tagged traffic belongs to.
584
*/
585
if (!pvid)
586
goto drop;
587
588
/* PVID is set on this port. Any untagged or priority-tagged
589
* ingress frame is considered to belong to this vlan.
590
*/
591
*vid = pvid;
592
if (likely(!tagged))
593
/* Untagged Frame. */
594
__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
595
else
596
/* Priority-tagged Frame.
597
* At this point, we know that skb->vlan_tci VID
598
* field was 0.
599
* We update only VID field and preserve PCP field.
600
*/
601
skb->vlan_tci |= pvid;
602
603
/* if snooping and stats are disabled we can avoid the lookup */
604
if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
605
!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
606
if (*state == BR_STATE_FORWARDING) {
607
*state = br_vlan_get_pvid_state(vg);
608
if (!br_vlan_state_allowed(*state, true))
609
goto drop;
610
}
611
return true;
612
}
613
}
614
v = br_vlan_find(vg, *vid);
615
if (!v || !br_vlan_should_use(v))
616
goto drop;
617
618
if (*state == BR_STATE_FORWARDING) {
619
*state = br_vlan_get_state(v);
620
if (!br_vlan_state_allowed(*state, true))
621
goto drop;
622
}
623
624
if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
625
stats = this_cpu_ptr(v->stats);
626
u64_stats_update_begin(&stats->syncp);
627
u64_stats_add(&stats->rx_bytes, skb->len);
628
u64_stats_inc(&stats->rx_packets);
629
u64_stats_update_end(&stats->syncp);
630
}
631
632
*vlan = v;
633
634
return true;
635
636
drop:
637
kfree_skb(skb);
638
return false;
639
}
640
641
bool br_allowed_ingress(const struct net_bridge *br,
642
struct net_bridge_vlan_group *vg, struct sk_buff *skb,
643
u16 *vid, u8 *state,
644
struct net_bridge_vlan **vlan)
645
{
646
/* If VLAN filtering is disabled on the bridge, all packets are
647
* permitted.
648
*/
649
*vlan = NULL;
650
if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
651
BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
652
return true;
653
}
654
655
return __allowed_ingress(br, vg, skb, vid, state, vlan);
656
}
657
658
/* Called under RCU. */
659
bool br_allowed_egress(struct net_bridge_vlan_group *vg,
660
const struct sk_buff *skb)
661
{
662
const struct net_bridge_vlan *v;
663
u16 vid;
664
665
/* If this packet was not filtered at input, let it pass */
666
if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
667
return true;
668
669
br_vlan_get_tag(skb, &vid);
670
v = br_vlan_find(vg, vid);
671
if (v && br_vlan_should_use(v) &&
672
br_vlan_state_allowed(br_vlan_get_state(v), false))
673
return true;
674
675
return false;
676
}
677
678
/* Called under RCU */
679
bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
680
{
681
struct net_bridge_vlan_group *vg;
682
struct net_bridge *br = p->br;
683
struct net_bridge_vlan *v;
684
685
/* If filtering was disabled at input, let it pass. */
686
if (!br_opt_get(br, BROPT_VLAN_ENABLED))
687
return true;
688
689
vg = nbp_vlan_group_rcu(p);
690
if (!vg || !vg->num_vlans)
691
return false;
692
693
if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
694
*vid = 0;
695
696
if (!*vid) {
697
*vid = br_get_pvid(vg);
698
if (!*vid ||
699
!br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
700
return false;
701
702
return true;
703
}
704
705
v = br_vlan_find(vg, *vid);
706
if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
707
return true;
708
709
return false;
710
}
711
712
static int br_vlan_add_existing(struct net_bridge *br,
713
struct net_bridge_vlan_group *vg,
714
struct net_bridge_vlan *vlan,
715
u16 flags, bool *changed,
716
struct netlink_ext_ack *extack)
717
{
718
bool becomes_brentry = false;
719
bool would_change = false;
720
int err;
721
722
if (!br_vlan_is_brentry(vlan)) {
723
/* Trying to change flags of non-existent bridge vlan */
724
if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
725
return -EINVAL;
726
727
becomes_brentry = true;
728
} else {
729
would_change = __vlan_flags_would_change(vlan, flags);
730
}
731
732
/* Master VLANs that aren't brentries weren't notified before,
733
* time to notify them now.
734
*/
735
if (becomes_brentry || would_change) {
736
err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags,
737
would_change, extack);
738
if (err && err != -EOPNOTSUPP)
739
return err;
740
}
741
742
if (becomes_brentry) {
743
/* It was only kept for port vlans, now make it real */
744
err = br_fdb_add_local(br, NULL, br->dev->dev_addr, vlan->vid);
745
if (err) {
746
br_err(br, "failed to insert local address into bridge forwarding table\n");
747
goto err_fdb_insert;
748
}
749
750
refcount_inc(&vlan->refcnt);
751
vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
752
vg->num_vlans++;
753
*changed = true;
754
br_multicast_toggle_one_vlan(vlan, true);
755
}
756
757
__vlan_flags_commit(vlan, flags);
758
if (would_change)
759
*changed = true;
760
761
return 0;
762
763
err_fdb_insert:
764
br_switchdev_port_vlan_del(br->dev, vlan->vid);
765
return err;
766
}
767
768
/* Must be protected by RTNL.
769
* Must be called with vid in range from 1 to 4094 inclusive.
770
* changed must be true only if the vlan was created or updated
771
*/
772
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
773
struct netlink_ext_ack *extack)
774
{
775
struct net_bridge_vlan_group *vg;
776
struct net_bridge_vlan *vlan;
777
int ret;
778
779
ASSERT_RTNL();
780
781
*changed = false;
782
vg = br_vlan_group(br);
783
vlan = br_vlan_find(vg, vid);
784
if (vlan)
785
return br_vlan_add_existing(br, vg, vlan, flags, changed,
786
extack);
787
788
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
789
if (!vlan)
790
return -ENOMEM;
791
792
vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
793
if (!vlan->stats) {
794
kfree(vlan);
795
return -ENOMEM;
796
}
797
vlan->vid = vid;
798
vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
799
vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
800
vlan->br = br;
801
if (flags & BRIDGE_VLAN_INFO_BRENTRY)
802
refcount_set(&vlan->refcnt, 1);
803
ret = __vlan_add(vlan, flags, extack);
804
if (ret) {
805
free_percpu(vlan->stats);
806
kfree(vlan);
807
} else {
808
*changed = true;
809
}
810
811
return ret;
812
}
813
814
/* Must be protected by RTNL.
815
* Must be called with vid in range from 1 to 4094 inclusive.
816
*/
817
int br_vlan_delete(struct net_bridge *br, u16 vid)
818
{
819
struct net_bridge_vlan_group *vg;
820
struct net_bridge_vlan *v;
821
822
ASSERT_RTNL();
823
824
vg = br_vlan_group(br);
825
v = br_vlan_find(vg, vid);
826
if (!v || !br_vlan_is_brentry(v))
827
return -ENOENT;
828
829
br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
830
br_fdb_delete_by_port(br, NULL, vid, 0);
831
832
vlan_tunnel_info_del(vg, v);
833
834
return __vlan_del(v);
835
}
836
837
void br_vlan_flush(struct net_bridge *br)
838
{
839
struct net_bridge_vlan_group *vg;
840
841
ASSERT_RTNL();
842
843
vg = br_vlan_group(br);
844
__vlan_flush(br, NULL, vg);
845
RCU_INIT_POINTER(br->vlgrp, NULL);
846
synchronize_net();
847
__vlan_group_free(vg);
848
}
849
850
struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
851
{
852
if (!vg)
853
return NULL;
854
855
return br_vlan_lookup(&vg->vlan_hash, vid);
856
}
857
858
/* Must be protected by RTNL. */
859
static void recalculate_group_addr(struct net_bridge *br)
860
{
861
if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
862
return;
863
864
spin_lock_bh(&br->lock);
865
if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
866
br->vlan_proto == htons(ETH_P_8021Q)) {
867
/* Bridge Group Address */
868
br->group_addr[5] = 0x00;
869
} else { /* vlan_enabled && ETH_P_8021AD */
870
/* Provider Bridge Group Address */
871
br->group_addr[5] = 0x08;
872
}
873
spin_unlock_bh(&br->lock);
874
}
875
876
/* Must be protected by RTNL. */
877
void br_recalculate_fwd_mask(struct net_bridge *br)
878
{
879
if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
880
br->vlan_proto == htons(ETH_P_8021Q))
881
br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
882
else /* vlan_enabled && ETH_P_8021AD */
883
br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
884
~(1u << br->group_addr[5]);
885
}
886
887
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val,
888
struct netlink_ext_ack *extack)
889
{
890
struct switchdev_attr attr = {
891
.orig_dev = br->dev,
892
.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
893
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
894
.u.vlan_filtering = val,
895
};
896
int err;
897
898
if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
899
return 0;
900
901
br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
902
903
err = switchdev_port_attr_set(br->dev, &attr, extack);
904
if (err && err != -EOPNOTSUPP) {
905
br_opt_toggle(br, BROPT_VLAN_ENABLED, !val);
906
return err;
907
}
908
909
br_manage_promisc(br);
910
recalculate_group_addr(br);
911
br_recalculate_fwd_mask(br);
912
if (!val && br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
913
br_info(br, "vlan filtering disabled, automatically disabling multicast vlan snooping\n");
914
br_multicast_toggle_vlan_snooping(br, false, NULL);
915
}
916
917
return 0;
918
}
919
920
bool br_vlan_enabled(const struct net_device *dev)
921
{
922
struct net_bridge *br = netdev_priv(dev);
923
924
return br_opt_get(br, BROPT_VLAN_ENABLED);
925
}
926
EXPORT_SYMBOL_GPL(br_vlan_enabled);
927
928
int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
929
{
930
struct net_bridge *br = netdev_priv(dev);
931
932
*p_proto = ntohs(br->vlan_proto);
933
934
return 0;
935
}
936
EXPORT_SYMBOL_GPL(br_vlan_get_proto);
937
938
int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
939
struct netlink_ext_ack *extack)
940
{
941
struct switchdev_attr attr = {
942
.orig_dev = br->dev,
943
.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
944
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
945
.u.vlan_protocol = ntohs(proto),
946
};
947
int err = 0;
948
struct net_bridge_port *p;
949
struct net_bridge_vlan *vlan;
950
struct net_bridge_vlan_group *vg;
951
__be16 oldproto = br->vlan_proto;
952
953
if (br->vlan_proto == proto)
954
return 0;
955
956
err = switchdev_port_attr_set(br->dev, &attr, extack);
957
if (err && err != -EOPNOTSUPP)
958
return err;
959
960
/* Add VLANs for the new proto to the device filter. */
961
list_for_each_entry(p, &br->port_list, list) {
962
vg = nbp_vlan_group(p);
963
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
964
if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
965
continue;
966
err = vlan_vid_add(p->dev, proto, vlan->vid);
967
if (err)
968
goto err_filt;
969
}
970
}
971
972
br->vlan_proto = proto;
973
974
recalculate_group_addr(br);
975
br_recalculate_fwd_mask(br);
976
977
/* Delete VLANs for the old proto from the device filter. */
978
list_for_each_entry(p, &br->port_list, list) {
979
vg = nbp_vlan_group(p);
980
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
981
if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
982
continue;
983
vlan_vid_del(p->dev, oldproto, vlan->vid);
984
}
985
}
986
987
return 0;
988
989
err_filt:
990
attr.u.vlan_protocol = ntohs(oldproto);
991
switchdev_port_attr_set(br->dev, &attr, NULL);
992
993
list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) {
994
if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
995
continue;
996
vlan_vid_del(p->dev, proto, vlan->vid);
997
}
998
999
list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1000
vg = nbp_vlan_group(p);
1001
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1002
if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
1003
continue;
1004
vlan_vid_del(p->dev, proto, vlan->vid);
1005
}
1006
}
1007
1008
return err;
1009
}
1010
1011
int br_vlan_set_proto(struct net_bridge *br, unsigned long val,
1012
struct netlink_ext_ack *extack)
1013
{
1014
if (!eth_type_vlan(htons(val)))
1015
return -EPROTONOSUPPORT;
1016
1017
return __br_vlan_set_proto(br, htons(val), extack);
1018
}
1019
1020
int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
1021
{
1022
switch (val) {
1023
case 0:
1024
case 1:
1025
br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
1026
break;
1027
default:
1028
return -EINVAL;
1029
}
1030
1031
return 0;
1032
}
1033
1034
int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
1035
{
1036
struct net_bridge_port *p;
1037
1038
/* allow to change the option if there are no port vlans configured */
1039
list_for_each_entry(p, &br->port_list, list) {
1040
struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1041
1042
if (vg->num_vlans)
1043
return -EBUSY;
1044
}
1045
1046
switch (val) {
1047
case 0:
1048
case 1:
1049
br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
1050
break;
1051
default:
1052
return -EINVAL;
1053
}
1054
1055
return 0;
1056
}
1057
1058
static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
1059
{
1060
struct net_bridge_vlan *v;
1061
1062
if (vid != vg->pvid)
1063
return false;
1064
1065
v = br_vlan_lookup(&vg->vlan_hash, vid);
1066
if (v && br_vlan_should_use(v) &&
1067
(v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1068
return true;
1069
1070
return false;
1071
}
1072
1073
static void br_vlan_disable_default_pvid(struct net_bridge *br)
1074
{
1075
struct net_bridge_port *p;
1076
u16 pvid = br->default_pvid;
1077
1078
/* Disable default_pvid on all ports where it is still
1079
* configured.
1080
*/
1081
if (vlan_default_pvid(br_vlan_group(br), pvid)) {
1082
if (!br_vlan_delete(br, pvid))
1083
br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1084
}
1085
1086
list_for_each_entry(p, &br->port_list, list) {
1087
if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
1088
!nbp_vlan_delete(p, pvid))
1089
br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1090
}
1091
1092
br->default_pvid = 0;
1093
}
1094
1095
int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
1096
struct netlink_ext_ack *extack)
1097
{
1098
const struct net_bridge_vlan *pvent;
1099
struct net_bridge_vlan_group *vg;
1100
struct net_bridge_port *p;
1101
unsigned long *changed;
1102
bool vlchange;
1103
u16 old_pvid;
1104
int err = 0;
1105
1106
if (!pvid) {
1107
br_vlan_disable_default_pvid(br);
1108
return 0;
1109
}
1110
1111
changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
1112
if (!changed)
1113
return -ENOMEM;
1114
1115
old_pvid = br->default_pvid;
1116
1117
/* Update default_pvid config only if we do not conflict with
1118
* user configuration.
1119
*/
1120
vg = br_vlan_group(br);
1121
pvent = br_vlan_find(vg, pvid);
1122
if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
1123
(!pvent || !br_vlan_should_use(pvent))) {
1124
err = br_vlan_add(br, pvid,
1125
BRIDGE_VLAN_INFO_PVID |
1126
BRIDGE_VLAN_INFO_UNTAGGED |
1127
BRIDGE_VLAN_INFO_BRENTRY,
1128
&vlchange, extack);
1129
if (err)
1130
goto out;
1131
1132
if (br_vlan_delete(br, old_pvid))
1133
br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
1134
br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
1135
__set_bit(0, changed);
1136
}
1137
1138
list_for_each_entry(p, &br->port_list, list) {
1139
/* Update default_pvid config only if we do not conflict with
1140
* user configuration.
1141
*/
1142
vg = nbp_vlan_group(p);
1143
if ((old_pvid &&
1144
!vlan_default_pvid(vg, old_pvid)) ||
1145
br_vlan_find(vg, pvid))
1146
continue;
1147
1148
err = nbp_vlan_add(p, pvid,
1149
BRIDGE_VLAN_INFO_PVID |
1150
BRIDGE_VLAN_INFO_UNTAGGED,
1151
&vlchange, extack);
1152
if (err)
1153
goto err_port;
1154
if (nbp_vlan_delete(p, old_pvid))
1155
br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
1156
br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
1157
__set_bit(p->port_no, changed);
1158
}
1159
1160
br->default_pvid = pvid;
1161
1162
out:
1163
bitmap_free(changed);
1164
return err;
1165
1166
err_port:
1167
list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1168
if (!test_bit(p->port_no, changed))
1169
continue;
1170
1171
if (old_pvid) {
1172
nbp_vlan_add(p, old_pvid,
1173
BRIDGE_VLAN_INFO_PVID |
1174
BRIDGE_VLAN_INFO_UNTAGGED,
1175
&vlchange, NULL);
1176
br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
1177
}
1178
nbp_vlan_delete(p, pvid);
1179
br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1180
}
1181
1182
if (test_bit(0, changed)) {
1183
if (old_pvid) {
1184
br_vlan_add(br, old_pvid,
1185
BRIDGE_VLAN_INFO_PVID |
1186
BRIDGE_VLAN_INFO_UNTAGGED |
1187
BRIDGE_VLAN_INFO_BRENTRY,
1188
&vlchange, NULL);
1189
br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
1190
}
1191
br_vlan_delete(br, pvid);
1192
br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1193
}
1194
goto out;
1195
}
1196
1197
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val,
1198
struct netlink_ext_ack *extack)
1199
{
1200
u16 pvid = val;
1201
int err = 0;
1202
1203
if (val >= VLAN_VID_MASK)
1204
return -EINVAL;
1205
1206
if (pvid == br->default_pvid)
1207
goto out;
1208
1209
/* Only allow default pvid change when filtering is disabled */
1210
if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1211
pr_info_once("Please disable vlan filtering to change default_pvid\n");
1212
err = -EPERM;
1213
goto out;
1214
}
1215
err = __br_vlan_set_default_pvid(br, pvid, extack);
1216
out:
1217
return err;
1218
}
1219
1220
int br_vlan_init(struct net_bridge *br)
1221
{
1222
struct net_bridge_vlan_group *vg;
1223
int ret = -ENOMEM;
1224
1225
vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1226
if (!vg)
1227
goto out;
1228
ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1229
if (ret)
1230
goto err_rhtbl;
1231
ret = vlan_tunnel_init(vg);
1232
if (ret)
1233
goto err_tunnel_init;
1234
INIT_LIST_HEAD(&vg->vlan_list);
1235
br->vlan_proto = htons(ETH_P_8021Q);
1236
br->default_pvid = 1;
1237
rcu_assign_pointer(br->vlgrp, vg);
1238
1239
out:
1240
return ret;
1241
1242
err_tunnel_init:
1243
rhashtable_destroy(&vg->vlan_hash);
1244
err_rhtbl:
1245
kfree(vg);
1246
1247
goto out;
1248
}
1249
1250
int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1251
{
1252
struct switchdev_attr attr = {
1253
.orig_dev = p->br->dev,
1254
.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1255
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1256
.u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1257
};
1258
struct net_bridge_vlan_group *vg;
1259
int ret = -ENOMEM;
1260
1261
vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1262
if (!vg)
1263
goto out;
1264
1265
ret = switchdev_port_attr_set(p->dev, &attr, extack);
1266
if (ret && ret != -EOPNOTSUPP)
1267
goto err_vlan_enabled;
1268
1269
ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1270
if (ret)
1271
goto err_rhtbl;
1272
ret = vlan_tunnel_init(vg);
1273
if (ret)
1274
goto err_tunnel_init;
1275
INIT_LIST_HEAD(&vg->vlan_list);
1276
rcu_assign_pointer(p->vlgrp, vg);
1277
if (p->br->default_pvid) {
1278
bool changed;
1279
1280
ret = nbp_vlan_add(p, p->br->default_pvid,
1281
BRIDGE_VLAN_INFO_PVID |
1282
BRIDGE_VLAN_INFO_UNTAGGED,
1283
&changed, extack);
1284
if (ret)
1285
goto err_vlan_add;
1286
br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
1287
}
1288
out:
1289
return ret;
1290
1291
err_vlan_add:
1292
RCU_INIT_POINTER(p->vlgrp, NULL);
1293
synchronize_rcu();
1294
vlan_tunnel_deinit(vg);
1295
err_tunnel_init:
1296
rhashtable_destroy(&vg->vlan_hash);
1297
err_rhtbl:
1298
err_vlan_enabled:
1299
kfree(vg);
1300
1301
goto out;
1302
}
1303
1304
/* Must be protected by RTNL.
1305
* Must be called with vid in range from 1 to 4094 inclusive.
1306
* changed must be true only if the vlan was created or updated
1307
*/
1308
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1309
bool *changed, struct netlink_ext_ack *extack)
1310
{
1311
struct net_bridge_vlan *vlan;
1312
int ret;
1313
1314
ASSERT_RTNL();
1315
1316
*changed = false;
1317
vlan = br_vlan_find(nbp_vlan_group(port), vid);
1318
if (vlan) {
1319
bool would_change = __vlan_flags_would_change(vlan, flags);
1320
1321
if (would_change) {
1322
/* Pass the flags to the hardware bridge */
1323
ret = br_switchdev_port_vlan_add(port->dev, vid, flags,
1324
true, extack);
1325
if (ret && ret != -EOPNOTSUPP)
1326
return ret;
1327
}
1328
1329
__vlan_flags_commit(vlan, flags);
1330
*changed = would_change;
1331
1332
return 0;
1333
}
1334
1335
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1336
if (!vlan)
1337
return -ENOMEM;
1338
1339
vlan->vid = vid;
1340
vlan->port = port;
1341
ret = __vlan_add(vlan, flags, extack);
1342
if (ret)
1343
kfree(vlan);
1344
else
1345
*changed = true;
1346
1347
return ret;
1348
}
1349
1350
/* Must be protected by RTNL.
1351
* Must be called with vid in range from 1 to 4094 inclusive.
1352
*/
1353
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1354
{
1355
struct net_bridge_vlan *v;
1356
1357
ASSERT_RTNL();
1358
1359
v = br_vlan_find(nbp_vlan_group(port), vid);
1360
if (!v)
1361
return -ENOENT;
1362
br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1363
br_fdb_delete_by_port(port->br, port, vid, 0);
1364
1365
return __vlan_del(v);
1366
}
1367
1368
void nbp_vlan_flush(struct net_bridge_port *port)
1369
{
1370
struct net_bridge_vlan_group *vg;
1371
1372
ASSERT_RTNL();
1373
1374
vg = nbp_vlan_group(port);
1375
__vlan_flush(port->br, port, vg);
1376
RCU_INIT_POINTER(port->vlgrp, NULL);
1377
synchronize_net();
1378
__vlan_group_free(vg);
1379
}
1380
1381
void br_vlan_get_stats(const struct net_bridge_vlan *v,
1382
struct pcpu_sw_netstats *stats)
1383
{
1384
int i;
1385
1386
memset(stats, 0, sizeof(*stats));
1387
for_each_possible_cpu(i) {
1388
u64 rxpackets, rxbytes, txpackets, txbytes;
1389
struct pcpu_sw_netstats *cpu_stats;
1390
unsigned int start;
1391
1392
cpu_stats = per_cpu_ptr(v->stats, i);
1393
do {
1394
start = u64_stats_fetch_begin(&cpu_stats->syncp);
1395
rxpackets = u64_stats_read(&cpu_stats->rx_packets);
1396
rxbytes = u64_stats_read(&cpu_stats->rx_bytes);
1397
txbytes = u64_stats_read(&cpu_stats->tx_bytes);
1398
txpackets = u64_stats_read(&cpu_stats->tx_packets);
1399
} while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
1400
1401
u64_stats_add(&stats->rx_packets, rxpackets);
1402
u64_stats_add(&stats->rx_bytes, rxbytes);
1403
u64_stats_add(&stats->tx_bytes, txbytes);
1404
u64_stats_add(&stats->tx_packets, txpackets);
1405
}
1406
}
1407
1408
int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1409
{
1410
struct net_bridge_vlan_group *vg;
1411
struct net_bridge_port *p;
1412
1413
ASSERT_RTNL();
1414
p = br_port_get_check_rtnl(dev);
1415
if (p)
1416
vg = nbp_vlan_group(p);
1417
else if (netif_is_bridge_master(dev))
1418
vg = br_vlan_group(netdev_priv(dev));
1419
else
1420
return -EINVAL;
1421
1422
*p_pvid = br_get_pvid(vg);
1423
return 0;
1424
}
1425
EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1426
1427
int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1428
{
1429
struct net_bridge_vlan_group *vg;
1430
struct net_bridge_port *p;
1431
1432
p = br_port_get_check_rcu(dev);
1433
if (p)
1434
vg = nbp_vlan_group_rcu(p);
1435
else if (netif_is_bridge_master(dev))
1436
vg = br_vlan_group_rcu(netdev_priv(dev));
1437
else
1438
return -EINVAL;
1439
1440
*p_pvid = br_get_pvid(vg);
1441
return 0;
1442
}
1443
EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1444
1445
void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
1446
struct net_device_path_ctx *ctx,
1447
struct net_device_path *path)
1448
{
1449
struct net_bridge_vlan_group *vg;
1450
int idx = ctx->num_vlans - 1;
1451
u16 vid;
1452
1453
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
1454
1455
if (!br_opt_get(br, BROPT_VLAN_ENABLED))
1456
return;
1457
1458
vg = br_vlan_group(br);
1459
1460
if (idx >= 0 &&
1461
ctx->vlan[idx].proto == br->vlan_proto) {
1462
vid = ctx->vlan[idx].id;
1463
} else {
1464
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_TAG;
1465
vid = br_get_pvid(vg);
1466
}
1467
1468
path->bridge.vlan_id = vid;
1469
path->bridge.vlan_proto = br->vlan_proto;
1470
}
1471
1472
int br_vlan_fill_forward_path_mode(struct net_bridge *br,
1473
struct net_bridge_port *dst,
1474
struct net_device_path *path)
1475
{
1476
struct net_bridge_vlan_group *vg;
1477
struct net_bridge_vlan *v;
1478
1479
if (!br_opt_get(br, BROPT_VLAN_ENABLED))
1480
return 0;
1481
1482
vg = nbp_vlan_group_rcu(dst);
1483
v = br_vlan_find(vg, path->bridge.vlan_id);
1484
if (!v || !br_vlan_should_use(v))
1485
return -EINVAL;
1486
1487
if (!(v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1488
return 0;
1489
1490
if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
1491
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
1492
else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
1493
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW;
1494
else
1495
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;
1496
1497
return 0;
1498
}
1499
1500
int br_vlan_get_info(const struct net_device *dev, u16 vid,
1501
struct bridge_vlan_info *p_vinfo)
1502
{
1503
struct net_bridge_vlan_group *vg;
1504
struct net_bridge_vlan *v;
1505
struct net_bridge_port *p;
1506
1507
ASSERT_RTNL();
1508
p = br_port_get_check_rtnl(dev);
1509
if (p)
1510
vg = nbp_vlan_group(p);
1511
else if (netif_is_bridge_master(dev))
1512
vg = br_vlan_group(netdev_priv(dev));
1513
else
1514
return -EINVAL;
1515
1516
v = br_vlan_find(vg, vid);
1517
if (!v)
1518
return -ENOENT;
1519
1520
p_vinfo->vid = vid;
1521
p_vinfo->flags = v->flags;
1522
if (vid == br_get_pvid(vg))
1523
p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1524
return 0;
1525
}
1526
EXPORT_SYMBOL_GPL(br_vlan_get_info);
1527
1528
int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
1529
struct bridge_vlan_info *p_vinfo)
1530
{
1531
struct net_bridge_vlan_group *vg;
1532
struct net_bridge_vlan *v;
1533
struct net_bridge_port *p;
1534
1535
p = br_port_get_check_rcu(dev);
1536
if (p)
1537
vg = nbp_vlan_group_rcu(p);
1538
else if (netif_is_bridge_master(dev))
1539
vg = br_vlan_group_rcu(netdev_priv(dev));
1540
else
1541
return -EINVAL;
1542
1543
v = br_vlan_find(vg, vid);
1544
if (!v)
1545
return -ENOENT;
1546
1547
p_vinfo->vid = vid;
1548
p_vinfo->flags = v->flags;
1549
if (vid == br_get_pvid(vg))
1550
p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1551
return 0;
1552
}
1553
EXPORT_SYMBOL_GPL(br_vlan_get_info_rcu);
1554
1555
static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1556
{
1557
return is_vlan_dev(dev) &&
1558
!!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1559
}
1560
1561
static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1562
__always_unused struct netdev_nested_priv *priv)
1563
{
1564
return br_vlan_is_bind_vlan_dev(dev);
1565
}
1566
1567
static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1568
{
1569
int found;
1570
1571
rcu_read_lock();
1572
found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1573
NULL);
1574
rcu_read_unlock();
1575
1576
return !!found;
1577
}
1578
1579
struct br_vlan_bind_walk_data {
1580
u16 vid;
1581
struct net_device *result;
1582
};
1583
1584
static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1585
struct netdev_nested_priv *priv)
1586
{
1587
struct br_vlan_bind_walk_data *data = priv->data;
1588
int found = 0;
1589
1590
if (br_vlan_is_bind_vlan_dev(dev) &&
1591
vlan_dev_priv(dev)->vlan_id == data->vid) {
1592
data->result = dev;
1593
found = 1;
1594
}
1595
1596
return found;
1597
}
1598
1599
static struct net_device *
1600
br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1601
{
1602
struct br_vlan_bind_walk_data data = {
1603
.vid = vid,
1604
};
1605
struct netdev_nested_priv priv = {
1606
.data = (void *)&data,
1607
};
1608
1609
rcu_read_lock();
1610
netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1611
&priv);
1612
rcu_read_unlock();
1613
1614
return data.result;
1615
}
1616
1617
static bool br_vlan_is_dev_up(const struct net_device *dev)
1618
{
1619
return !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1620
}
1621
1622
static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1623
struct net_device *vlan_dev)
1624
{
1625
u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1626
struct net_bridge_vlan_group *vg;
1627
struct net_bridge_port *p;
1628
bool has_carrier = false;
1629
1630
if (!netif_carrier_ok(br->dev)) {
1631
netif_carrier_off(vlan_dev);
1632
return;
1633
}
1634
1635
list_for_each_entry(p, &br->port_list, list) {
1636
vg = nbp_vlan_group(p);
1637
if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1638
has_carrier = true;
1639
break;
1640
}
1641
}
1642
1643
if (has_carrier)
1644
netif_carrier_on(vlan_dev);
1645
else
1646
netif_carrier_off(vlan_dev);
1647
}
1648
1649
static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1650
{
1651
struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1652
struct net_bridge_vlan *vlan;
1653
struct net_device *vlan_dev;
1654
1655
list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1656
vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1657
vlan->vid);
1658
if (vlan_dev) {
1659
if (br_vlan_is_dev_up(p->dev)) {
1660
if (netif_carrier_ok(p->br->dev))
1661
netif_carrier_on(vlan_dev);
1662
} else {
1663
br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1664
}
1665
}
1666
}
1667
}
1668
1669
static void br_vlan_toggle_bridge_binding(struct net_device *br_dev,
1670
bool enable)
1671
{
1672
struct net_bridge *br = netdev_priv(br_dev);
1673
1674
if (enable)
1675
br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1676
else
1677
br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1678
br_vlan_has_upper_bind_vlan_dev(br_dev));
1679
}
1680
1681
static void br_vlan_upper_change(struct net_device *dev,
1682
struct net_device *upper_dev,
1683
bool linking)
1684
{
1685
struct net_bridge *br = netdev_priv(dev);
1686
1687
if (!br_vlan_is_bind_vlan_dev(upper_dev))
1688
return;
1689
1690
br_vlan_toggle_bridge_binding(dev, linking);
1691
if (linking)
1692
br_vlan_set_vlan_dev_state(br, upper_dev);
1693
}
1694
1695
struct br_vlan_link_state_walk_data {
1696
struct net_bridge *br;
1697
};
1698
1699
static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1700
struct netdev_nested_priv *priv)
1701
{
1702
struct br_vlan_link_state_walk_data *data = priv->data;
1703
1704
if (br_vlan_is_bind_vlan_dev(vlan_dev))
1705
br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1706
1707
return 0;
1708
}
1709
1710
static void br_vlan_link_state_change(struct net_device *dev,
1711
struct net_bridge *br)
1712
{
1713
struct br_vlan_link_state_walk_data data = {
1714
.br = br
1715
};
1716
struct netdev_nested_priv priv = {
1717
.data = (void *)&data,
1718
};
1719
1720
rcu_read_lock();
1721
netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1722
&priv);
1723
rcu_read_unlock();
1724
}
1725
1726
/* Must be protected by RTNL. */
1727
static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1728
{
1729
struct net_device *vlan_dev;
1730
1731
if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1732
return;
1733
1734
vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1735
if (vlan_dev)
1736
br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1737
}
1738
1739
/* Must be protected by RTNL. */
1740
int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1741
{
1742
struct netdev_notifier_changeupper_info *info;
1743
struct net_bridge *br = netdev_priv(dev);
1744
int vlcmd = 0, ret = 0;
1745
bool changed = false;
1746
1747
switch (event) {
1748
case NETDEV_REGISTER:
1749
ret = br_vlan_add(br, br->default_pvid,
1750
BRIDGE_VLAN_INFO_PVID |
1751
BRIDGE_VLAN_INFO_UNTAGGED |
1752
BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1753
vlcmd = RTM_NEWVLAN;
1754
break;
1755
case NETDEV_UNREGISTER:
1756
changed = !br_vlan_delete(br, br->default_pvid);
1757
vlcmd = RTM_DELVLAN;
1758
break;
1759
case NETDEV_CHANGEUPPER:
1760
info = ptr;
1761
br_vlan_upper_change(dev, info->upper_dev, info->linking);
1762
break;
1763
1764
case NETDEV_CHANGE:
1765
case NETDEV_UP:
1766
if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1767
break;
1768
br_vlan_link_state_change(dev, br);
1769
break;
1770
}
1771
if (changed)
1772
br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
1773
1774
return ret;
1775
}
1776
1777
void br_vlan_vlan_upper_event(struct net_device *br_dev,
1778
struct net_device *vlan_dev,
1779
unsigned long event)
1780
{
1781
struct vlan_dev_priv *vlan = vlan_dev_priv(vlan_dev);
1782
struct net_bridge *br = netdev_priv(br_dev);
1783
bool bridge_binding;
1784
1785
switch (event) {
1786
case NETDEV_CHANGE:
1787
case NETDEV_UP:
1788
break;
1789
default:
1790
return;
1791
}
1792
1793
bridge_binding = vlan->flags & VLAN_FLAG_BRIDGE_BINDING;
1794
br_vlan_toggle_bridge_binding(br_dev, bridge_binding);
1795
if (bridge_binding)
1796
br_vlan_set_vlan_dev_state(br, vlan_dev);
1797
else if (!bridge_binding && netif_carrier_ok(br_dev))
1798
netif_carrier_on(vlan_dev);
1799
}
1800
1801
/* Must be protected by RTNL. */
1802
void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1803
{
1804
if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1805
return;
1806
1807
switch (event) {
1808
case NETDEV_CHANGE:
1809
case NETDEV_DOWN:
1810
case NETDEV_UP:
1811
br_vlan_set_all_vlan_dev_state(p);
1812
break;
1813
}
1814
}
1815
1816
static bool br_vlan_stats_fill(struct sk_buff *skb,
1817
const struct net_bridge_vlan *v)
1818
{
1819
struct pcpu_sw_netstats stats;
1820
struct nlattr *nest;
1821
1822
nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
1823
if (!nest)
1824
return false;
1825
1826
br_vlan_get_stats(v, &stats);
1827
if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES,
1828
u64_stats_read(&stats.rx_bytes),
1829
BRIDGE_VLANDB_STATS_PAD) ||
1830
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
1831
u64_stats_read(&stats.rx_packets),
1832
BRIDGE_VLANDB_STATS_PAD) ||
1833
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES,
1834
u64_stats_read(&stats.tx_bytes),
1835
BRIDGE_VLANDB_STATS_PAD) ||
1836
nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
1837
u64_stats_read(&stats.tx_packets),
1838
BRIDGE_VLANDB_STATS_PAD))
1839
goto out_err;
1840
1841
nla_nest_end(skb, nest);
1842
1843
return true;
1844
1845
out_err:
1846
nla_nest_cancel(skb, nest);
1847
return false;
1848
}
1849
1850
/* v_opts is used to dump the options which must be equal in the whole range */
1851
static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1852
const struct net_bridge_vlan *v_opts,
1853
const struct net_bridge_port *p,
1854
u16 flags,
1855
bool dump_stats)
1856
{
1857
struct bridge_vlan_info info;
1858
struct nlattr *nest;
1859
1860
nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
1861
if (!nest)
1862
return false;
1863
1864
memset(&info, 0, sizeof(info));
1865
info.vid = vid;
1866
if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
1867
info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1868
if (flags & BRIDGE_VLAN_INFO_PVID)
1869
info.flags |= BRIDGE_VLAN_INFO_PVID;
1870
1871
if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
1872
goto out_err;
1873
1874
if (vid_range && vid < vid_range &&
1875
!(flags & BRIDGE_VLAN_INFO_PVID) &&
1876
nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
1877
goto out_err;
1878
1879
if (v_opts) {
1880
if (!br_vlan_opts_fill(skb, v_opts, p))
1881
goto out_err;
1882
1883
if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
1884
goto out_err;
1885
}
1886
1887
nla_nest_end(skb, nest);
1888
1889
return true;
1890
1891
out_err:
1892
nla_nest_cancel(skb, nest);
1893
return false;
1894
}
1895
1896
static size_t rtnl_vlan_nlmsg_size(void)
1897
{
1898
return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
1899
+ nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
1900
+ nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
1901
+ nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
1902
+ br_vlan_opts_nl_size(); /* bridge vlan options */
1903
}
1904
1905
void br_vlan_notify(const struct net_bridge *br,
1906
const struct net_bridge_port *p,
1907
u16 vid, u16 vid_range,
1908
int cmd)
1909
{
1910
struct net_bridge_vlan_group *vg;
1911
struct net_bridge_vlan *v = NULL;
1912
struct br_vlan_msg *bvm;
1913
struct nlmsghdr *nlh;
1914
struct sk_buff *skb;
1915
int err = -ENOBUFS;
1916
struct net *net;
1917
u16 flags = 0;
1918
int ifindex;
1919
1920
/* right now notifications are done only with rtnl held */
1921
ASSERT_RTNL();
1922
1923
if (p) {
1924
ifindex = p->dev->ifindex;
1925
vg = nbp_vlan_group(p);
1926
net = dev_net(p->dev);
1927
} else {
1928
ifindex = br->dev->ifindex;
1929
vg = br_vlan_group(br);
1930
net = dev_net(br->dev);
1931
}
1932
1933
skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
1934
if (!skb)
1935
goto out_err;
1936
1937
err = -EMSGSIZE;
1938
nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
1939
if (!nlh)
1940
goto out_err;
1941
bvm = nlmsg_data(nlh);
1942
memset(bvm, 0, sizeof(*bvm));
1943
bvm->family = AF_BRIDGE;
1944
bvm->ifindex = ifindex;
1945
1946
switch (cmd) {
1947
case RTM_NEWVLAN:
1948
/* need to find the vlan due to flags/options */
1949
v = br_vlan_find(vg, vid);
1950
if (!v || !br_vlan_should_use(v))
1951
goto out_kfree;
1952
1953
flags = v->flags;
1954
if (br_get_pvid(vg) == v->vid)
1955
flags |= BRIDGE_VLAN_INFO_PVID;
1956
break;
1957
case RTM_DELVLAN:
1958
break;
1959
default:
1960
goto out_kfree;
1961
}
1962
1963
if (!br_vlan_fill_vids(skb, vid, vid_range, v, p, flags, false))
1964
goto out_err;
1965
1966
nlmsg_end(skb, nlh);
1967
rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
1968
return;
1969
1970
out_err:
1971
rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
1972
out_kfree:
1973
kfree_skb(skb);
1974
}
1975
1976
/* check if v_curr can enter a range ending in range_end */
1977
bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
1978
const struct net_bridge_vlan *range_end)
1979
{
1980
return v_curr->vid - range_end->vid == 1 &&
1981
range_end->flags == v_curr->flags &&
1982
br_vlan_opts_eq_range(v_curr, range_end);
1983
}
1984
1985
static int br_vlan_dump_dev(const struct net_device *dev,
1986
struct sk_buff *skb,
1987
struct netlink_callback *cb,
1988
u32 dump_flags)
1989
{
1990
struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1991
bool dump_global = !!(dump_flags & BRIDGE_VLANDB_DUMPF_GLOBAL);
1992
bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1993
struct net_bridge_vlan_group *vg;
1994
int idx = 0, s_idx = cb->args[1];
1995
struct nlmsghdr *nlh = NULL;
1996
struct net_bridge_port *p;
1997
struct br_vlan_msg *bvm;
1998
struct net_bridge *br;
1999
int err = 0;
2000
u16 pvid;
2001
2002
if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
2003
return -EINVAL;
2004
2005
if (netif_is_bridge_master(dev)) {
2006
br = netdev_priv(dev);
2007
vg = br_vlan_group_rcu(br);
2008
p = NULL;
2009
} else {
2010
/* global options are dumped only for bridge devices */
2011
if (dump_global)
2012
return 0;
2013
2014
p = br_port_get_rcu(dev);
2015
if (WARN_ON(!p))
2016
return -EINVAL;
2017
vg = nbp_vlan_group_rcu(p);
2018
br = p->br;
2019
}
2020
2021
if (!vg)
2022
return 0;
2023
2024
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2025
RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
2026
if (!nlh)
2027
return -EMSGSIZE;
2028
bvm = nlmsg_data(nlh);
2029
memset(bvm, 0, sizeof(*bvm));
2030
bvm->family = PF_BRIDGE;
2031
bvm->ifindex = dev->ifindex;
2032
pvid = br_get_pvid(vg);
2033
2034
/* idx must stay at range's beginning until it is filled in */
2035
list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
2036
if (!dump_global && !br_vlan_should_use(v))
2037
continue;
2038
if (idx < s_idx) {
2039
idx++;
2040
continue;
2041
}
2042
2043
if (!range_start) {
2044
range_start = v;
2045
range_end = v;
2046
continue;
2047
}
2048
2049
if (dump_global) {
2050
if (br_vlan_global_opts_can_enter_range(v, range_end))
2051
goto update_end;
2052
if (!br_vlan_global_opts_fill(skb, range_start->vid,
2053
range_end->vid,
2054
range_start)) {
2055
err = -EMSGSIZE;
2056
break;
2057
}
2058
/* advance number of filled vlans */
2059
idx += range_end->vid - range_start->vid + 1;
2060
2061
range_start = v;
2062
} else if (dump_stats || v->vid == pvid ||
2063
!br_vlan_can_enter_range(v, range_end)) {
2064
u16 vlan_flags = br_vlan_flags(range_start, pvid);
2065
2066
if (!br_vlan_fill_vids(skb, range_start->vid,
2067
range_end->vid, range_start,
2068
p, vlan_flags, dump_stats)) {
2069
err = -EMSGSIZE;
2070
break;
2071
}
2072
/* advance number of filled vlans */
2073
idx += range_end->vid - range_start->vid + 1;
2074
2075
range_start = v;
2076
}
2077
update_end:
2078
range_end = v;
2079
}
2080
2081
/* err will be 0 and range_start will be set in 3 cases here:
2082
* - first vlan (range_start == range_end)
2083
* - last vlan (range_start == range_end, not in range)
2084
* - last vlan range (range_start != range_end, in range)
2085
*/
2086
if (!err && range_start) {
2087
if (dump_global &&
2088
!br_vlan_global_opts_fill(skb, range_start->vid,
2089
range_end->vid, range_start))
2090
err = -EMSGSIZE;
2091
else if (!dump_global &&
2092
!br_vlan_fill_vids(skb, range_start->vid,
2093
range_end->vid, range_start,
2094
p, br_vlan_flags(range_start, pvid),
2095
dump_stats))
2096
err = -EMSGSIZE;
2097
}
2098
2099
cb->args[1] = err ? idx : 0;
2100
2101
nlmsg_end(skb, nlh);
2102
2103
return err;
2104
}
2105
2106
static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
2107
[BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
2108
};
2109
2110
static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
2111
{
2112
struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
2113
int idx = 0, err = 0, s_idx = cb->args[0];
2114
struct net *net = sock_net(skb->sk);
2115
struct br_vlan_msg *bvm;
2116
struct net_device *dev;
2117
u32 dump_flags = 0;
2118
2119
err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
2120
br_vlan_db_dump_pol, cb->extack);
2121
if (err < 0)
2122
return err;
2123
2124
bvm = nlmsg_data(cb->nlh);
2125
if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
2126
dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
2127
2128
rcu_read_lock();
2129
if (bvm->ifindex) {
2130
dev = dev_get_by_index_rcu(net, bvm->ifindex);
2131
if (!dev) {
2132
err = -ENODEV;
2133
goto out_err;
2134
}
2135
err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2136
/* if the dump completed without an error we return 0 here */
2137
if (err != -EMSGSIZE)
2138
goto out_err;
2139
} else {
2140
for_each_netdev_rcu(net, dev) {
2141
if (idx < s_idx)
2142
goto skip;
2143
2144
err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2145
if (err == -EMSGSIZE)
2146
break;
2147
skip:
2148
idx++;
2149
}
2150
}
2151
cb->args[0] = idx;
2152
rcu_read_unlock();
2153
2154
return skb->len;
2155
2156
out_err:
2157
rcu_read_unlock();
2158
2159
return err;
2160
}
2161
2162
static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
2163
[BRIDGE_VLANDB_ENTRY_INFO] =
2164
NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
2165
[BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 },
2166
[BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 },
2167
[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
2168
[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER] = { .type = NLA_U8 },
2169
[BRIDGE_VLANDB_ENTRY_MCAST_N_GROUPS] = { .type = NLA_REJECT },
2170
[BRIDGE_VLANDB_ENTRY_MCAST_MAX_GROUPS] = { .type = NLA_U32 },
2171
[BRIDGE_VLANDB_ENTRY_NEIGH_SUPPRESS] = NLA_POLICY_MAX(NLA_U8, 1),
2172
};
2173
2174
static int br_vlan_rtm_process_one(struct net_device *dev,
2175
const struct nlattr *attr,
2176
int cmd, struct netlink_ext_ack *extack)
2177
{
2178
struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
2179
struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
2180
bool changed = false, skip_processing = false;
2181
struct net_bridge_vlan_group *vg;
2182
struct net_bridge_port *p = NULL;
2183
int err = 0, cmdmap = 0;
2184
struct net_bridge *br;
2185
2186
if (netif_is_bridge_master(dev)) {
2187
br = netdev_priv(dev);
2188
vg = br_vlan_group(br);
2189
} else {
2190
p = br_port_get_rtnl(dev);
2191
if (WARN_ON(!p))
2192
return -ENODEV;
2193
br = p->br;
2194
vg = nbp_vlan_group(p);
2195
}
2196
2197
if (WARN_ON(!vg))
2198
return -ENODEV;
2199
2200
err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
2201
br_vlan_db_policy, extack);
2202
if (err)
2203
return err;
2204
2205
if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
2206
NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
2207
return -EINVAL;
2208
}
2209
memset(&vrange_end, 0, sizeof(vrange_end));
2210
2211
vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
2212
if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
2213
BRIDGE_VLAN_INFO_RANGE_END)) {
2214
NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
2215
return -EINVAL;
2216
}
2217
if (!br_vlan_valid_id(vinfo->vid, extack))
2218
return -EINVAL;
2219
2220
if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
2221
vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
2222
/* validate user-provided flags without RANGE_BEGIN */
2223
vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
2224
vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
2225
2226
/* vinfo_last is the range start, vinfo the range end */
2227
vinfo_last = vinfo;
2228
vinfo = &vrange_end;
2229
2230
if (!br_vlan_valid_id(vinfo->vid, extack) ||
2231
!br_vlan_valid_range(vinfo, vinfo_last, extack))
2232
return -EINVAL;
2233
}
2234
2235
switch (cmd) {
2236
case RTM_NEWVLAN:
2237
cmdmap = RTM_SETLINK;
2238
skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
2239
break;
2240
case RTM_DELVLAN:
2241
cmdmap = RTM_DELLINK;
2242
break;
2243
}
2244
2245
if (!skip_processing) {
2246
struct bridge_vlan_info *tmp_last = vinfo_last;
2247
2248
/* br_process_vlan_info may overwrite vinfo_last */
2249
err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
2250
&changed, extack);
2251
2252
/* notify first if anything changed */
2253
if (changed)
2254
br_ifinfo_notify(cmdmap, br, p);
2255
2256
if (err)
2257
return err;
2258
}
2259
2260
/* deal with options */
2261
if (cmd == RTM_NEWVLAN) {
2262
struct net_bridge_vlan *range_start, *range_end;
2263
2264
if (vinfo_last) {
2265
range_start = br_vlan_find(vg, vinfo_last->vid);
2266
range_end = br_vlan_find(vg, vinfo->vid);
2267
} else {
2268
range_start = br_vlan_find(vg, vinfo->vid);
2269
range_end = range_start;
2270
}
2271
2272
err = br_vlan_process_options(br, p, range_start, range_end,
2273
tb, extack);
2274
}
2275
2276
return err;
2277
}
2278
2279
static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
2280
struct netlink_ext_ack *extack)
2281
{
2282
struct net *net = sock_net(skb->sk);
2283
struct br_vlan_msg *bvm;
2284
struct net_device *dev;
2285
struct nlattr *attr;
2286
int err, vlans = 0;
2287
int rem;
2288
2289
/* this should validate the header and check for remaining bytes */
2290
err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
2291
extack);
2292
if (err < 0)
2293
return err;
2294
2295
bvm = nlmsg_data(nlh);
2296
dev = __dev_get_by_index(net, bvm->ifindex);
2297
if (!dev)
2298
return -ENODEV;
2299
2300
if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
2301
NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
2302
return -EINVAL;
2303
}
2304
2305
nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
2306
switch (nla_type(attr)) {
2307
case BRIDGE_VLANDB_ENTRY:
2308
err = br_vlan_rtm_process_one(dev, attr,
2309
nlh->nlmsg_type,
2310
extack);
2311
break;
2312
case BRIDGE_VLANDB_GLOBAL_OPTIONS:
2313
err = br_vlan_rtm_process_global_options(dev, attr,
2314
nlh->nlmsg_type,
2315
extack);
2316
break;
2317
default:
2318
continue;
2319
}
2320
2321
vlans++;
2322
if (err)
2323
break;
2324
}
2325
if (!vlans) {
2326
NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
2327
err = -EINVAL;
2328
}
2329
2330
return err;
2331
}
2332
2333
static const struct rtnl_msg_handler br_vlan_rtnl_msg_handlers[] = {
2334
{THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN, br_vlan_rtm_process, NULL, 0},
2335
{THIS_MODULE, PF_BRIDGE, RTM_DELVLAN, br_vlan_rtm_process, NULL, 0},
2336
{THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL, br_vlan_rtm_dump, 0},
2337
};
2338
2339
int br_vlan_rtnl_init(void)
2340
{
2341
return rtnl_register_many(br_vlan_rtnl_msg_handlers);
2342
}
2343
2344
void br_vlan_rtnl_uninit(void)
2345
{
2346
rtnl_unregister_many(br_vlan_rtnl_msg_handlers);
2347
}
2348
2349