Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/net/if_bridge.c
103755 views
1
/* $NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $ */
2
3
/*-
4
* SPDX-License-Identifier: BSD-4-Clause
5
*
6
* Copyright 2001 Wasabi Systems, Inc.
7
* All rights reserved.
8
*
9
* Written by Jason R. Thorpe for Wasabi Systems, Inc.
10
*
11
* Redistribution and use in source and binary forms, with or without
12
* modification, are permitted provided that the following conditions
13
* are met:
14
* 1. Redistributions of source code must retain the above copyright
15
* notice, this list of conditions and the following disclaimer.
16
* 2. Redistributions in binary form must reproduce the above copyright
17
* notice, this list of conditions and the following disclaimer in the
18
* documentation and/or other materials provided with the distribution.
19
* 3. All advertising materials mentioning features or use of this software
20
* must display the following acknowledgement:
21
* This product includes software developed for the NetBSD Project by
22
* Wasabi Systems, Inc.
23
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
24
* or promote products derived from this software without specific prior
25
* written permission.
26
*
27
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
28
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
31
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37
* POSSIBILITY OF SUCH DAMAGE.
38
*/
39
40
/*
41
* Copyright (c) 1999, 2000 Jason L. Wright ([email protected])
42
* All rights reserved.
43
*
44
* Redistribution and use in source and binary forms, with or without
45
* modification, are permitted provided that the following conditions
46
* are met:
47
* 1. Redistributions of source code must retain the above copyright
48
* notice, this list of conditions and the following disclaimer.
49
* 2. Redistributions in binary form must reproduce the above copyright
50
* notice, this list of conditions and the following disclaimer in the
51
* documentation and/or other materials provided with the distribution.
52
*
53
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
54
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
55
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
56
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
57
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
58
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
59
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
61
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
62
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
63
* POSSIBILITY OF SUCH DAMAGE.
64
*
65
* OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
66
*/
67
68
/*
69
* Network interface bridge support.
70
*
71
* TODO:
72
*
73
* - Currently only supports Ethernet-like interfaces (Ethernet,
74
* 802.11, VLANs on Ethernet, etc.) Figure out a nice way
75
* to bridge other types of interfaces (maybe consider
76
* heterogeneous bridges).
77
*/
78
79
#include "opt_inet.h"
80
#include "opt_inet6.h"
81
82
#define EXTERR_CATEGORY EXTERR_CAT_BRIDGE
83
84
#include <sys/param.h>
85
#include <sys/ctype.h> /* string functions */
86
#include <sys/eventhandler.h>
87
#include <sys/exterrvar.h>
88
#include <sys/jail.h>
89
#include <sys/kernel.h>
90
#include <sys/lock.h>
91
#include <sys/malloc.h>
92
#include <sys/mbuf.h>
93
#include <sys/module.h>
94
#include <sys/mutex.h>
95
#include <sys/priv.h>
96
#include <sys/proc.h>
97
#include <sys/protosw.h>
98
#include <sys/random.h>
99
#include <sys/systm.h>
100
#include <sys/socket.h> /* for net/if.h */
101
#include <sys/sockio.h>
102
#include <sys/syslog.h>
103
#include <sys/sysctl.h>
104
#include <sys/time.h>
105
106
#include <vm/uma.h>
107
108
#include <net/bpf.h>
109
#include <net/if.h>
110
#include <net/if_clone.h>
111
#include <net/if_dl.h>
112
#include <net/if_types.h>
113
#include <net/if_var.h>
114
#include <net/if_private.h>
115
#include <net/pfil.h>
116
#include <net/vnet.h>
117
118
#include <netinet/in.h>
119
#include <netinet/in_systm.h>
120
#include <netinet/in_var.h>
121
#include <netinet/ip.h>
122
#include <netinet/ip_var.h>
123
#ifdef INET6
124
#include <netinet/ip6.h>
125
#include <netinet6/ip6_var.h>
126
#include <netinet6/in6_ifattach.h>
127
#endif
128
#if defined(INET) || defined(INET6)
129
#include <netinet/ip_carp.h>
130
#endif
131
#include <machine/in_cksum.h>
132
#include <netinet/if_ether.h>
133
#include <net/bridgestp.h>
134
#include <net/if_bridgevar.h>
135
#include <net/if_llc.h>
136
#include <net/if_vlan_var.h>
137
138
#include <net/route.h>
139
140
/*
141
* At various points in the code we need to know if we're hooked into the INET
142
* and/or INET6 pfil. Define some macros to do that based on which IP versions
143
* are enabled in the kernel. This avoids littering the rest of the code with
144
* #ifnet INET6 to avoid referencing V_inet6_pfil_head.
145
*/
146
#ifdef INET6
147
#define PFIL_HOOKED_IN_INET6 PFIL_HOOKED_IN(V_inet6_pfil_head)
148
#define PFIL_HOOKED_OUT_INET6 PFIL_HOOKED_OUT(V_inet6_pfil_head)
149
#else
150
#define PFIL_HOOKED_IN_INET6 false
151
#define PFIL_HOOKED_OUT_INET6 false
152
#endif
153
154
#ifdef INET
155
#define PFIL_HOOKED_IN_INET PFIL_HOOKED_IN(V_inet_pfil_head)
156
#define PFIL_HOOKED_OUT_INET PFIL_HOOKED_OUT(V_inet_pfil_head)
157
#else
158
#define PFIL_HOOKED_IN_INET false
159
#define PFIL_HOOKED_OUT_INET false
160
#endif
161
162
#define PFIL_HOOKED_IN_46 (PFIL_HOOKED_IN_INET6 || PFIL_HOOKED_IN_INET)
163
#define PFIL_HOOKED_OUT_46 (PFIL_HOOKED_OUT_INET6 || PFIL_HOOKED_OUT_INET)
164
165
/*
166
* Size of the route hash table. Must be a power of two.
167
*/
168
#ifndef BRIDGE_RTHASH_SIZE
169
#define BRIDGE_RTHASH_SIZE 1024
170
#endif
171
172
#define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1)
173
174
/*
175
* Default maximum number of addresses to cache.
176
*/
177
#ifndef BRIDGE_RTABLE_MAX
178
#define BRIDGE_RTABLE_MAX 2000
179
#endif
180
181
/*
182
* Timeout (in seconds) for entries learned dynamically.
183
*/
184
#ifndef BRIDGE_RTABLE_TIMEOUT
185
#define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */
186
#endif
187
188
/*
189
* Number of seconds between walks of the route list.
190
*/
191
#ifndef BRIDGE_RTABLE_PRUNE_PERIOD
192
#define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60)
193
#endif
194
195
/*
196
* List of capabilities to possibly mask on the member interface.
197
*/
198
#define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM|\
199
IFCAP_TXCSUM_IPV6|IFCAP_MEXTPG)
200
201
/*
202
* List of capabilities to strip
203
*/
204
#define BRIDGE_IFCAPS_STRIP IFCAP_LRO
205
206
/*
207
* Bridge locking
208
*
209
* The bridge relies heavily on the epoch(9) system to protect its data
210
* structures. This means we can safely use CK_LISTs while in NET_EPOCH, but we
211
* must ensure there is only one writer at a time.
212
*
213
* That is: for read accesses we only need to be in NET_EPOCH, but for write
214
* accesses we must hold:
215
*
216
* - BRIDGE_RT_LOCK, for any change to bridge_rtnodes
217
* - BRIDGE_LOCK, for any other change
218
*
219
* The BRIDGE_LOCK is a sleepable lock, because it is held across ioctl()
220
* calls to bridge member interfaces and these ioctl()s can sleep.
221
* The BRIDGE_RT_LOCK is a non-sleepable mutex, because it is sometimes
222
* required while we're in NET_EPOCH and then we're not allowed to sleep.
223
*/
224
#define BRIDGE_LOCK_INIT(_sc) do { \
225
sx_init(&(_sc)->sc_sx, "if_bridge"); \
226
mtx_init(&(_sc)->sc_rt_mtx, "if_bridge rt", NULL, MTX_DEF); \
227
} while (0)
228
#define BRIDGE_LOCK_DESTROY(_sc) do { \
229
sx_destroy(&(_sc)->sc_sx); \
230
mtx_destroy(&(_sc)->sc_rt_mtx); \
231
} while (0)
232
#define BRIDGE_LOCK(_sc) sx_xlock(&(_sc)->sc_sx)
233
#define BRIDGE_UNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
234
#define BRIDGE_LOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SX_XLOCKED)
235
#define BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(_sc) \
236
MPASS(in_epoch(net_epoch_preempt) || sx_xlocked(&(_sc)->sc_sx))
237
#define BRIDGE_UNLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SX_UNLOCKED)
238
#define BRIDGE_RT_LOCK(_sc) mtx_lock(&(_sc)->sc_rt_mtx)
239
#define BRIDGE_RT_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_rt_mtx)
240
#define BRIDGE_RT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_rt_mtx, MA_OWNED)
241
#define BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(_sc) \
242
MPASS(in_epoch(net_epoch_preempt) || mtx_owned(&(_sc)->sc_rt_mtx))
243
244
struct bridge_softc;
245
246
/*
247
* Bridge interface list entry.
248
*/
249
struct bridge_iflist {
250
CK_LIST_ENTRY(bridge_iflist) bif_next;
251
struct ifnet *bif_ifp; /* member if */
252
struct bridge_softc *bif_sc; /* parent bridge */
253
struct bstp_port bif_stp; /* STP state */
254
uint32_t bif_flags; /* member if flags */
255
int bif_savedcaps; /* saved capabilities */
256
uint32_t bif_addrmax; /* max # of addresses */
257
uint32_t bif_addrcnt; /* cur. # of addresses */
258
uint32_t bif_addrexceeded;/* # of address violations */
259
struct epoch_context bif_epoch_ctx;
260
ether_vlanid_t bif_pvid; /* port vlan id */
261
ifbvlan_set_t bif_vlan_set; /* if allowed tagged vlans */
262
uint16_t bif_vlanproto; /* vlan protocol */
263
};
264
265
/*
266
* Bridge route node.
267
*/
268
struct bridge_rtnode {
269
CK_LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */
270
CK_LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */
271
struct bridge_iflist *brt_dst; /* destination if */
272
unsigned long brt_expire; /* expiration time */
273
uint8_t brt_flags; /* address flags */
274
uint8_t brt_addr[ETHER_ADDR_LEN];
275
ether_vlanid_t brt_vlan; /* vlan id */
276
struct vnet *brt_vnet;
277
struct epoch_context brt_epoch_ctx;
278
};
279
#define brt_ifp brt_dst->bif_ifp
280
281
/*
282
* Software state for each bridge.
283
*/
284
struct bridge_softc {
285
struct ifnet *sc_ifp; /* make this an interface */
286
LIST_ENTRY(bridge_softc) sc_list;
287
struct sx sc_sx;
288
struct mtx sc_rt_mtx;
289
uint32_t sc_brtmax; /* max # of addresses */
290
uint32_t sc_brtcnt; /* cur. # of addresses */
291
uint32_t sc_brttimeout; /* rt timeout in seconds */
292
struct callout sc_brcallout; /* bridge callout */
293
CK_LIST_HEAD(, bridge_iflist) sc_iflist; /* member interface list */
294
CK_LIST_HEAD(, bridge_rtnode) *sc_rthash; /* our forwarding table */
295
CK_LIST_HEAD(, bridge_rtnode) sc_rtlist; /* list version of above */
296
uint32_t sc_rthash_key; /* key for hash */
297
CK_LIST_HEAD(, bridge_iflist) sc_spanlist; /* span ports list */
298
struct bstp_state sc_stp; /* STP state */
299
uint32_t sc_brtexceeded; /* # of cache drops */
300
struct ifnet *sc_ifaddr; /* member mac copied from */
301
struct ether_addr sc_defaddr; /* Default MAC address */
302
if_input_fn_t sc_if_input; /* Saved copy of if_input */
303
struct epoch_context sc_epoch_ctx;
304
ifbr_flags_t sc_flags; /* bridge flags */
305
ether_vlanid_t sc_defpvid; /* default PVID */
306
};
307
308
VNET_DEFINE_STATIC(struct sx, bridge_list_sx);
309
#define V_bridge_list_sx VNET(bridge_list_sx)
310
static eventhandler_tag bridge_detach_cookie;
311
312
int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
313
314
VNET_DEFINE_STATIC(uma_zone_t, bridge_rtnode_zone);
315
#define V_bridge_rtnode_zone VNET(bridge_rtnode_zone)
316
317
static int bridge_clone_create(struct if_clone *, char *, size_t,
318
struct ifc_data *, struct ifnet **);
319
static int bridge_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
320
321
static int bridge_ioctl(struct ifnet *, u_long, caddr_t);
322
static void bridge_mutecaps(struct bridge_softc *);
323
static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
324
int);
325
static void bridge_ifdetach(void *arg __unused, struct ifnet *);
326
static void bridge_init(void *);
327
static void bridge_dummynet(struct mbuf *, struct ifnet *);
328
static bool bridge_same(const void *, const void *);
329
static void *bridge_get_softc(struct ifnet *);
330
static void bridge_stop(struct ifnet *, int);
331
static int bridge_transmit(struct ifnet *, struct mbuf *);
332
#ifdef ALTQ
333
static void bridge_altq_start(if_t);
334
static int bridge_altq_transmit(if_t, struct mbuf *);
335
#endif
336
static void bridge_qflush(struct ifnet *);
337
static struct mbuf *bridge_input(struct ifnet *, struct mbuf *);
338
static void bridge_inject(struct ifnet *, struct mbuf *);
339
static int bridge_output(struct ifnet *, struct mbuf *, struct sockaddr *,
340
struct rtentry *);
341
static int bridge_enqueue(struct bridge_softc *, struct ifnet *,
342
struct mbuf *, struct bridge_iflist *);
343
static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
344
345
static void bridge_forward(struct bridge_softc *, struct bridge_iflist *,
346
struct mbuf *m);
347
static bool bridge_member_ifaddrs(void);
348
static void bridge_timer(void *);
349
350
static void bridge_broadcast(struct bridge_softc *, struct ifnet *,
351
struct mbuf *, int);
352
static void bridge_span(struct bridge_softc *, struct mbuf *);
353
354
static int bridge_rtupdate(struct bridge_softc *, const uint8_t *,
355
ether_vlanid_t, struct bridge_iflist *, int, uint8_t);
356
static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
357
ether_vlanid_t);
358
static void bridge_rttrim(struct bridge_softc *);
359
static void bridge_rtage(struct bridge_softc *);
360
static void bridge_rtflush(struct bridge_softc *, int);
361
static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
362
ether_vlanid_t);
363
static bool bridge_vfilter_in(const struct bridge_iflist *, struct mbuf *);
364
static bool bridge_vfilter_out(const struct bridge_iflist *,
365
const struct mbuf *);
366
367
static void bridge_rtable_init(struct bridge_softc *);
368
static void bridge_rtable_fini(struct bridge_softc *);
369
370
static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
371
static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
372
const uint8_t *, ether_vlanid_t);
373
static int bridge_rtnode_insert(struct bridge_softc *,
374
struct bridge_rtnode *);
375
static void bridge_rtnode_destroy(struct bridge_softc *,
376
struct bridge_rtnode *);
377
static void bridge_rtable_expire(struct ifnet *, int);
378
static void bridge_state_change(struct ifnet *, int);
379
380
static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
381
const char *name);
382
static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
383
struct ifnet *ifp);
384
static void bridge_delete_member(struct bridge_softc *,
385
struct bridge_iflist *, int);
386
static void bridge_delete_span(struct bridge_softc *,
387
struct bridge_iflist *);
388
389
static int bridge_ioctl_add(struct bridge_softc *, void *);
390
static int bridge_ioctl_del(struct bridge_softc *, void *);
391
static int bridge_ioctl_gifflags(struct bridge_softc *, void *);
392
static int bridge_ioctl_sifflags(struct bridge_softc *, void *);
393
static int bridge_ioctl_scache(struct bridge_softc *, void *);
394
static int bridge_ioctl_gcache(struct bridge_softc *, void *);
395
static int bridge_ioctl_gifs(struct bridge_softc *, void *);
396
static int bridge_ioctl_rts(struct bridge_softc *, void *);
397
static int bridge_ioctl_saddr(struct bridge_softc *, void *);
398
static int bridge_ioctl_sto(struct bridge_softc *, void *);
399
static int bridge_ioctl_gto(struct bridge_softc *, void *);
400
static int bridge_ioctl_daddr(struct bridge_softc *, void *);
401
static int bridge_ioctl_flush(struct bridge_softc *, void *);
402
static int bridge_ioctl_gpri(struct bridge_softc *, void *);
403
static int bridge_ioctl_spri(struct bridge_softc *, void *);
404
static int bridge_ioctl_ght(struct bridge_softc *, void *);
405
static int bridge_ioctl_sht(struct bridge_softc *, void *);
406
static int bridge_ioctl_gfd(struct bridge_softc *, void *);
407
static int bridge_ioctl_sfd(struct bridge_softc *, void *);
408
static int bridge_ioctl_gma(struct bridge_softc *, void *);
409
static int bridge_ioctl_sma(struct bridge_softc *, void *);
410
static int bridge_ioctl_sifprio(struct bridge_softc *, void *);
411
static int bridge_ioctl_sifcost(struct bridge_softc *, void *);
412
static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
413
static int bridge_ioctl_sifpvid(struct bridge_softc *, void *);
414
static int bridge_ioctl_sifvlanset(struct bridge_softc *, void *);
415
static int bridge_ioctl_gifvlanset(struct bridge_softc *, void *);
416
static int bridge_ioctl_addspan(struct bridge_softc *, void *);
417
static int bridge_ioctl_delspan(struct bridge_softc *, void *);
418
static int bridge_ioctl_gbparam(struct bridge_softc *, void *);
419
static int bridge_ioctl_grte(struct bridge_softc *, void *);
420
static int bridge_ioctl_gifsstp(struct bridge_softc *, void *);
421
static int bridge_ioctl_sproto(struct bridge_softc *, void *);
422
static int bridge_ioctl_stxhc(struct bridge_softc *, void *);
423
static int bridge_ioctl_gflags(struct bridge_softc *, void *);
424
static int bridge_ioctl_sflags(struct bridge_softc *, void *);
425
static int bridge_ioctl_gdefpvid(struct bridge_softc *, void *);
426
static int bridge_ioctl_sdefpvid(struct bridge_softc *, void *);
427
static int bridge_ioctl_svlanproto(struct bridge_softc *, void *);
428
static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
429
int);
430
#ifdef INET
431
static int bridge_ip_checkbasic(struct mbuf **mp);
432
static int bridge_fragment(struct ifnet *, struct mbuf **mp,
433
struct ether_header *, int, struct llc *);
434
#endif /* INET */
435
#ifdef INET6
436
static int bridge_ip6_checkbasic(struct mbuf **mp);
437
#endif /* INET6 */
438
static void bridge_linkstate(struct ifnet *ifp);
439
static void bridge_linkcheck(struct bridge_softc *sc);
440
441
/*
442
* Use the "null" value from IEEE 802.1Q-2014 Table 9-2
443
* to indicate untagged frames.
444
*/
445
#define VLANTAGOF(_m) \
446
((_m->m_flags & M_VLANTAG) ? EVL_VLANOFTAG(_m->m_pkthdr.ether_vtag) : DOT1Q_VID_NULL)
447
448
static struct bstp_cb_ops bridge_ops = {
449
.bcb_state = bridge_state_change,
450
.bcb_rtage = bridge_rtable_expire
451
};
452
453
SYSCTL_DECL(_net_link);
454
static SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
455
"Bridge");
456
457
/* only pass IP[46] packets when pfil is enabled */
458
VNET_DEFINE_STATIC(int, pfil_onlyip) = 1;
459
#define V_pfil_onlyip VNET(pfil_onlyip)
460
SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip,
461
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_onlyip), 0,
462
"Only pass IP packets when pfil is enabled");
463
464
/* run pfil hooks on the bridge interface */
465
VNET_DEFINE_STATIC(int, pfil_bridge) = 0;
466
#define V_pfil_bridge VNET(pfil_bridge)
467
SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge,
468
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_bridge), 0,
469
"Packet filter on the bridge interface");
470
471
/* layer2 filter with ipfw */
472
VNET_DEFINE_STATIC(int, pfil_ipfw);
473
#define V_pfil_ipfw VNET(pfil_ipfw)
474
475
/* layer2 ARP filter with ipfw */
476
VNET_DEFINE_STATIC(int, pfil_ipfw_arp);
477
#define V_pfil_ipfw_arp VNET(pfil_ipfw_arp)
478
SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp,
479
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_ipfw_arp), 0,
480
"Filter ARP packets through IPFW layer2");
481
482
/* run pfil hooks on the member interface */
483
VNET_DEFINE_STATIC(int, pfil_member) = 0;
484
#define V_pfil_member VNET(pfil_member)
485
SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member,
486
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_member), 0,
487
"Packet filter on the member interface");
488
489
/* run pfil hooks on the physical interface for locally destined packets */
490
VNET_DEFINE_STATIC(int, pfil_local_phys);
491
#define V_pfil_local_phys VNET(pfil_local_phys)
492
SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
493
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(pfil_local_phys), 0,
494
"Packet filter on the physical interface for locally destined packets");
495
496
/* log STP state changes */
497
VNET_DEFINE_STATIC(int, log_stp);
498
#define V_log_stp VNET(log_stp)
499
SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp,
500
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(log_stp), 0,
501
"Log STP state changes");
502
503
/* share MAC with first bridge member */
504
VNET_DEFINE_STATIC(int, bridge_inherit_mac);
505
#define V_bridge_inherit_mac VNET(bridge_inherit_mac)
506
SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
507
CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(bridge_inherit_mac), 0,
508
"Inherit MAC address from the first bridge member");
509
510
VNET_DEFINE_STATIC(int, allow_llz_overlap) = 0;
511
#define V_allow_llz_overlap VNET(allow_llz_overlap)
512
SYSCTL_INT(_net_link_bridge, OID_AUTO, allow_llz_overlap,
513
CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(allow_llz_overlap), 0,
514
"Allow overlap of link-local scope "
515
"zones of a bridge interface and the member interfaces");
516
517
/* log MAC address port flapping */
518
VNET_DEFINE_STATIC(bool, log_mac_flap) = true;
519
#define V_log_mac_flap VNET(log_mac_flap)
520
SYSCTL_BOOL(_net_link_bridge, OID_AUTO, log_mac_flap,
521
CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(log_mac_flap), true,
522
"Log MAC address port flapping");
523
524
/* allow IP addresses on bridge members */
525
VNET_DEFINE_STATIC(bool, member_ifaddrs) = true;
526
#define V_member_ifaddrs VNET(member_ifaddrs)
527
SYSCTL_BOOL(_net_link_bridge, OID_AUTO, member_ifaddrs,
528
CTLFLAG_RW | CTLFLAG_VNET, &VNET_NAME(member_ifaddrs), false,
529
"Allow layer 3 addresses on bridge members (deprecated)");
530
531
static bool
532
bridge_member_ifaddrs(void)
533
{
534
return (V_member_ifaddrs);
535
}
536
537
VNET_DEFINE_STATIC(int, log_interval) = 5;
538
VNET_DEFINE_STATIC(int, log_count) = 0;
539
VNET_DEFINE_STATIC(struct timeval, log_last) = { 0 };
540
541
#define V_log_interval VNET(log_interval)
542
#define V_log_count VNET(log_count)
543
#define V_log_last VNET(log_last)
544
545
struct bridge_control {
546
int (*bc_func)(struct bridge_softc *, void *);
547
int bc_argsize;
548
int bc_flags;
549
};
550
551
#define BC_F_COPYIN 0x01 /* copy arguments in */
552
#define BC_F_COPYOUT 0x02 /* copy arguments out */
553
#define BC_F_SUSER 0x04 /* do super-user check */
554
555
static const struct bridge_control bridge_control_table[] = {
556
{ bridge_ioctl_add, sizeof(struct ifbreq),
557
BC_F_COPYIN|BC_F_SUSER },
558
{ bridge_ioctl_del, sizeof(struct ifbreq),
559
BC_F_COPYIN|BC_F_SUSER },
560
561
{ bridge_ioctl_gifflags, sizeof(struct ifbreq),
562
BC_F_COPYIN|BC_F_COPYOUT },
563
{ bridge_ioctl_sifflags, sizeof(struct ifbreq),
564
BC_F_COPYIN|BC_F_SUSER },
565
566
{ bridge_ioctl_scache, sizeof(struct ifbrparam),
567
BC_F_COPYIN|BC_F_SUSER },
568
{ bridge_ioctl_gcache, sizeof(struct ifbrparam),
569
BC_F_COPYOUT },
570
571
{ bridge_ioctl_gifs, sizeof(struct ifbifconf),
572
BC_F_COPYIN|BC_F_COPYOUT },
573
{ bridge_ioctl_rts, sizeof(struct ifbaconf),
574
BC_F_COPYIN|BC_F_COPYOUT },
575
576
{ bridge_ioctl_saddr, sizeof(struct ifbareq),
577
BC_F_COPYIN|BC_F_SUSER },
578
579
{ bridge_ioctl_sto, sizeof(struct ifbrparam),
580
BC_F_COPYIN|BC_F_SUSER },
581
{ bridge_ioctl_gto, sizeof(struct ifbrparam),
582
BC_F_COPYOUT },
583
584
{ bridge_ioctl_daddr, sizeof(struct ifbareq),
585
BC_F_COPYIN|BC_F_SUSER },
586
587
{ bridge_ioctl_flush, sizeof(struct ifbreq),
588
BC_F_COPYIN|BC_F_SUSER },
589
590
{ bridge_ioctl_gpri, sizeof(struct ifbrparam),
591
BC_F_COPYOUT },
592
{ bridge_ioctl_spri, sizeof(struct ifbrparam),
593
BC_F_COPYIN|BC_F_SUSER },
594
595
{ bridge_ioctl_ght, sizeof(struct ifbrparam),
596
BC_F_COPYOUT },
597
{ bridge_ioctl_sht, sizeof(struct ifbrparam),
598
BC_F_COPYIN|BC_F_SUSER },
599
600
{ bridge_ioctl_gfd, sizeof(struct ifbrparam),
601
BC_F_COPYOUT },
602
{ bridge_ioctl_sfd, sizeof(struct ifbrparam),
603
BC_F_COPYIN|BC_F_SUSER },
604
605
{ bridge_ioctl_gma, sizeof(struct ifbrparam),
606
BC_F_COPYOUT },
607
{ bridge_ioctl_sma, sizeof(struct ifbrparam),
608
BC_F_COPYIN|BC_F_SUSER },
609
610
{ bridge_ioctl_sifprio, sizeof(struct ifbreq),
611
BC_F_COPYIN|BC_F_SUSER },
612
613
{ bridge_ioctl_sifcost, sizeof(struct ifbreq),
614
BC_F_COPYIN|BC_F_SUSER },
615
616
{ bridge_ioctl_addspan, sizeof(struct ifbreq),
617
BC_F_COPYIN|BC_F_SUSER },
618
{ bridge_ioctl_delspan, sizeof(struct ifbreq),
619
BC_F_COPYIN|BC_F_SUSER },
620
621
{ bridge_ioctl_gbparam, sizeof(struct ifbropreq),
622
BC_F_COPYOUT },
623
624
{ bridge_ioctl_grte, sizeof(struct ifbrparam),
625
BC_F_COPYOUT },
626
627
{ bridge_ioctl_gifsstp, sizeof(struct ifbpstpconf),
628
BC_F_COPYIN|BC_F_COPYOUT },
629
630
{ bridge_ioctl_sproto, sizeof(struct ifbrparam),
631
BC_F_COPYIN|BC_F_SUSER },
632
633
{ bridge_ioctl_stxhc, sizeof(struct ifbrparam),
634
BC_F_COPYIN|BC_F_SUSER },
635
636
{ bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq),
637
BC_F_COPYIN|BC_F_SUSER },
638
639
{ bridge_ioctl_sifpvid, sizeof(struct ifbreq),
640
BC_F_COPYIN|BC_F_SUSER },
641
642
{ bridge_ioctl_sifvlanset, sizeof(struct ifbif_vlan_req),
643
BC_F_COPYIN|BC_F_SUSER },
644
645
{ bridge_ioctl_gifvlanset, sizeof(struct ifbif_vlan_req),
646
BC_F_COPYIN|BC_F_COPYOUT },
647
648
{ bridge_ioctl_gflags, sizeof(struct ifbrparam),
649
BC_F_COPYOUT },
650
651
{ bridge_ioctl_sflags, sizeof(struct ifbrparam),
652
BC_F_COPYIN|BC_F_SUSER },
653
654
{ bridge_ioctl_gdefpvid, sizeof(struct ifbrparam),
655
BC_F_COPYOUT },
656
657
{ bridge_ioctl_sdefpvid, sizeof(struct ifbrparam),
658
BC_F_COPYIN|BC_F_SUSER },
659
660
{ bridge_ioctl_svlanproto, sizeof(struct ifbreq),
661
BC_F_COPYIN|BC_F_SUSER },
662
};
663
static const int bridge_control_table_size = nitems(bridge_control_table);
664
665
VNET_DEFINE_STATIC(LIST_HEAD(, bridge_softc), bridge_list) =
666
LIST_HEAD_INITIALIZER();
667
#define V_bridge_list VNET(bridge_list)
668
#define BRIDGE_LIST_LOCK_INIT(x) sx_init(&V_bridge_list_sx, \
669
"if_bridge list")
670
#define BRIDGE_LIST_LOCK_DESTROY(x) sx_destroy(&V_bridge_list_sx)
671
#define BRIDGE_LIST_LOCK(x) sx_xlock(&V_bridge_list_sx)
672
#define BRIDGE_LIST_UNLOCK(x) sx_xunlock(&V_bridge_list_sx)
673
674
VNET_DEFINE_STATIC(struct if_clone *, bridge_cloner);
675
#define V_bridge_cloner VNET(bridge_cloner)
676
677
static const char bridge_name[] = "bridge";
678
679
static void
680
vnet_bridge_init(const void *unused __unused)
681
{
682
683
V_bridge_rtnode_zone = uma_zcreate("bridge_rtnode",
684
sizeof(struct bridge_rtnode), NULL, NULL, NULL, NULL,
685
UMA_ALIGN_PTR, 0);
686
BRIDGE_LIST_LOCK_INIT();
687
688
struct if_clone_addreq req = {
689
.create_f = bridge_clone_create,
690
.destroy_f = bridge_clone_destroy,
691
.flags = IFC_F_AUTOUNIT,
692
};
693
V_bridge_cloner = ifc_attach_cloner(bridge_name, &req);
694
}
695
VNET_SYSINIT(vnet_bridge_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
696
vnet_bridge_init, NULL);
697
698
static void
699
vnet_bridge_uninit(const void *unused __unused)
700
{
701
702
ifc_detach_cloner(V_bridge_cloner);
703
V_bridge_cloner = NULL;
704
BRIDGE_LIST_LOCK_DESTROY();
705
706
/* Callbacks may use the UMA zone. */
707
NET_EPOCH_DRAIN_CALLBACKS();
708
709
uma_zdestroy(V_bridge_rtnode_zone);
710
}
711
VNET_SYSUNINIT(vnet_bridge_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
712
vnet_bridge_uninit, NULL);
713
714
static int
715
bridge_modevent(module_t mod, int type, void *data)
716
{
717
718
switch (type) {
719
case MOD_LOAD:
720
bridge_dn_p = bridge_dummynet;
721
bridge_same_p = bridge_same;
722
bridge_get_softc_p = bridge_get_softc;
723
bridge_member_ifaddrs_p = bridge_member_ifaddrs;
724
bridge_detach_cookie = EVENTHANDLER_REGISTER(
725
ifnet_departure_event, bridge_ifdetach, NULL,
726
EVENTHANDLER_PRI_ANY);
727
break;
728
case MOD_UNLOAD:
729
EVENTHANDLER_DEREGISTER(ifnet_departure_event,
730
bridge_detach_cookie);
731
bridge_dn_p = NULL;
732
bridge_same_p = NULL;
733
bridge_get_softc_p = NULL;
734
bridge_member_ifaddrs_p = NULL;
735
break;
736
default:
737
return (EOPNOTSUPP);
738
}
739
return (0);
740
}
741
742
static moduledata_t bridge_mod = {
743
"if_bridge",
744
bridge_modevent,
745
0
746
};
747
748
DECLARE_MODULE(if_bridge, bridge_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
749
MODULE_VERSION(if_bridge, 1);
750
MODULE_DEPEND(if_bridge, bridgestp, 1, 1, 1);
751
752
/*
753
* handler for net.link.bridge.ipfw
754
*/
755
static int
756
sysctl_pfil_ipfw(SYSCTL_HANDLER_ARGS)
757
{
758
int enable = V_pfil_ipfw;
759
int error;
760
761
error = sysctl_handle_int(oidp, &enable, 0, req);
762
enable &= 1;
763
764
if (enable != V_pfil_ipfw) {
765
V_pfil_ipfw = enable;
766
767
/*
768
* Disable pfil so that ipfw doesnt run twice, if the user
769
* really wants both then they can re-enable pfil_bridge and/or
770
* pfil_member. Also allow non-ip packets as ipfw can filter by
771
* layer2 type.
772
*/
773
if (V_pfil_ipfw) {
774
V_pfil_onlyip = 0;
775
V_pfil_bridge = 0;
776
V_pfil_member = 0;
777
}
778
}
779
780
return (error);
781
}
782
SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw,
783
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_VNET | CTLFLAG_NEEDGIANT,
784
&VNET_NAME(pfil_ipfw), 0, &sysctl_pfil_ipfw, "I",
785
"Layer2 filter with IPFW");
786
787
#ifdef VIMAGE
788
static void
789
bridge_reassign(struct ifnet *ifp, struct vnet *newvnet, char *arg)
790
{
791
struct bridge_softc *sc = ifp->if_softc;
792
struct bridge_iflist *bif;
793
794
BRIDGE_LOCK(sc);
795
796
while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
797
bridge_delete_member(sc, bif, 0);
798
799
while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
800
bridge_delete_span(sc, bif);
801
}
802
803
BRIDGE_UNLOCK(sc);
804
}
805
#endif
806
807
/*
808
* bridge_get_softc:
809
*
810
* Return the bridge softc for an ifnet.
811
*/
812
static void *
813
bridge_get_softc(struct ifnet *ifp)
814
{
815
struct bridge_iflist *bif;
816
817
NET_EPOCH_ASSERT();
818
819
bif = ifp->if_bridge;
820
if (bif == NULL)
821
return (NULL);
822
return (bif->bif_sc);
823
}
824
825
/*
826
* bridge_same:
827
*
828
* Return true if two interfaces are in the same bridge. This is only used by
829
* bridgestp via bridge_same_p.
830
*/
831
static bool
832
bridge_same(const void *bifap, const void *bifbp)
833
{
834
const struct bridge_iflist *bifa = bifap, *bifb = bifbp;
835
836
NET_EPOCH_ASSERT();
837
838
if (bifa == NULL || bifb == NULL)
839
return (false);
840
841
return (bifa->bif_sc == bifb->bif_sc);
842
}
843
844
/*
845
* bridge_clone_create:
846
*
847
* Create a new bridge instance.
848
*/
849
static int
850
bridge_clone_create(struct if_clone *ifc, char *name, size_t len,
851
struct ifc_data *ifd, struct ifnet **ifpp)
852
{
853
struct bridge_softc *sc;
854
struct ifnet *ifp;
855
856
sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
857
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
858
859
BRIDGE_LOCK_INIT(sc);
860
sc->sc_brtmax = BRIDGE_RTABLE_MAX;
861
sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
862
863
/* Initialize our routing table. */
864
bridge_rtable_init(sc);
865
866
callout_init_mtx(&sc->sc_brcallout, &sc->sc_rt_mtx, 0);
867
868
CK_LIST_INIT(&sc->sc_iflist);
869
CK_LIST_INIT(&sc->sc_spanlist);
870
871
ifp->if_softc = sc;
872
if_initname(ifp, bridge_name, ifd->unit);
873
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
874
ifp->if_capabilities = ifp->if_capenable = IFCAP_VLAN_HWTAGGING;
875
ifp->if_ioctl = bridge_ioctl;
876
#ifdef ALTQ
877
ifp->if_start = bridge_altq_start;
878
ifp->if_transmit = bridge_altq_transmit;
879
IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
880
ifp->if_snd.ifq_drv_maxlen = 0;
881
IFQ_SET_READY(&ifp->if_snd);
882
#else
883
ifp->if_transmit = bridge_transmit;
884
#endif
885
ifp->if_qflush = bridge_qflush;
886
ifp->if_init = bridge_init;
887
ifp->if_type = IFT_BRIDGE;
888
889
ether_gen_addr(ifp, &sc->sc_defaddr);
890
891
bstp_attach(&sc->sc_stp, &bridge_ops);
892
ether_ifattach(ifp, sc->sc_defaddr.octet);
893
/* Now undo some of the damage... */
894
ifp->if_baudrate = 0;
895
#ifdef VIMAGE
896
ifp->if_reassign = bridge_reassign;
897
#endif
898
sc->sc_if_input = ifp->if_input; /* ether_input */
899
ifp->if_input = bridge_inject;
900
901
/*
902
* Allow BRIDGE_INPUT() to pass in packets originating from the bridge
903
* itself via bridge_inject(). This is required for netmap but
904
* otherwise has no effect.
905
*/
906
ifp->if_bridge_input = bridge_input;
907
908
BRIDGE_LIST_LOCK();
909
LIST_INSERT_HEAD(&V_bridge_list, sc, sc_list);
910
BRIDGE_LIST_UNLOCK();
911
*ifpp = ifp;
912
913
return (0);
914
}
915
916
static void
917
bridge_clone_destroy_cb(struct epoch_context *ctx)
918
{
919
struct bridge_softc *sc;
920
921
sc = __containerof(ctx, struct bridge_softc, sc_epoch_ctx);
922
923
BRIDGE_LOCK_DESTROY(sc);
924
free(sc, M_DEVBUF);
925
}
926
927
/*
928
* bridge_clone_destroy:
929
*
930
* Destroy a bridge instance.
931
*/
932
static int
933
bridge_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
934
{
935
struct bridge_softc *sc = ifp->if_softc;
936
struct bridge_iflist *bif;
937
struct epoch_tracker et;
938
939
BRIDGE_LOCK(sc);
940
941
bridge_stop(ifp, 1);
942
ifp->if_flags &= ~IFF_UP;
943
944
while ((bif = CK_LIST_FIRST(&sc->sc_iflist)) != NULL)
945
bridge_delete_member(sc, bif, 0);
946
947
while ((bif = CK_LIST_FIRST(&sc->sc_spanlist)) != NULL) {
948
bridge_delete_span(sc, bif);
949
}
950
951
/* Tear down the routing table. */
952
bridge_rtable_fini(sc);
953
954
BRIDGE_UNLOCK(sc);
955
956
NET_EPOCH_ENTER(et);
957
958
callout_drain(&sc->sc_brcallout);
959
960
BRIDGE_LIST_LOCK();
961
LIST_REMOVE(sc, sc_list);
962
BRIDGE_LIST_UNLOCK();
963
964
bstp_detach(&sc->sc_stp);
965
#ifdef ALTQ
966
IFQ_PURGE(&ifp->if_snd);
967
#endif
968
NET_EPOCH_EXIT(et);
969
970
ether_ifdetach(ifp);
971
if_free(ifp);
972
973
NET_EPOCH_CALL(bridge_clone_destroy_cb, &sc->sc_epoch_ctx);
974
975
return (0);
976
}
977
978
/*
979
* bridge_ioctl:
980
*
981
* Handle a control request from the operator.
982
*/
983
static int
984
bridge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
985
{
986
struct bridge_softc *sc = ifp->if_softc;
987
struct ifreq *ifr = (struct ifreq *)data;
988
struct bridge_iflist *bif;
989
struct thread *td = curthread;
990
union {
991
struct ifbreq ifbreq;
992
struct ifbifconf ifbifconf;
993
struct ifbareq ifbareq;
994
struct ifbaconf ifbaconf;
995
struct ifbrparam ifbrparam;
996
struct ifbropreq ifbropreq;
997
struct ifbif_vlan_req ifvlanreq;
998
} args;
999
struct ifdrv *ifd = (struct ifdrv *) data;
1000
const struct bridge_control *bc;
1001
int error = 0, oldmtu;
1002
1003
BRIDGE_LOCK(sc);
1004
1005
switch (cmd) {
1006
case SIOCADDMULTI:
1007
case SIOCDELMULTI:
1008
break;
1009
1010
case SIOCGDRVSPEC:
1011
case SIOCSDRVSPEC:
1012
if (ifd->ifd_cmd >= bridge_control_table_size) {
1013
error = EXTERROR(EINVAL, "Invalid control command");
1014
break;
1015
}
1016
bc = &bridge_control_table[ifd->ifd_cmd];
1017
1018
if (cmd == SIOCGDRVSPEC &&
1019
(bc->bc_flags & BC_F_COPYOUT) == 0) {
1020
error = EXTERROR(EINVAL,
1021
"Inappropriate ioctl for command "
1022
"(expected SIOCSDRVSPEC)");
1023
break;
1024
}
1025
else if (cmd == SIOCSDRVSPEC &&
1026
(bc->bc_flags & BC_F_COPYOUT) != 0) {
1027
error = EXTERROR(EINVAL,
1028
"Inappropriate ioctl for command "
1029
"(expected SIOCGDRVSPEC)");
1030
break;
1031
}
1032
1033
if (bc->bc_flags & BC_F_SUSER) {
1034
error = priv_check(td, PRIV_NET_BRIDGE);
1035
if (error) {
1036
EXTERROR(error, "PRIV_NET_BRIDGE required");
1037
break;
1038
}
1039
}
1040
1041
if (ifd->ifd_len != bc->bc_argsize ||
1042
ifd->ifd_len > sizeof(args)) {
1043
error = EXTERROR(EINVAL, "Invalid argument size");
1044
break;
1045
}
1046
1047
bzero(&args, sizeof(args));
1048
if (bc->bc_flags & BC_F_COPYIN) {
1049
error = copyin(ifd->ifd_data, &args, ifd->ifd_len);
1050
if (error)
1051
break;
1052
}
1053
1054
oldmtu = ifp->if_mtu;
1055
error = (*bc->bc_func)(sc, &args);
1056
if (error)
1057
break;
1058
1059
/*
1060
* Bridge MTU may change during addition of the first port.
1061
* If it did, do network layer specific procedure.
1062
*/
1063
if (ifp->if_mtu != oldmtu)
1064
if_notifymtu(ifp);
1065
1066
if (bc->bc_flags & BC_F_COPYOUT)
1067
error = copyout(&args, ifd->ifd_data, ifd->ifd_len);
1068
1069
break;
1070
1071
case SIOCSIFFLAGS:
1072
if (!(ifp->if_flags & IFF_UP) &&
1073
(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1074
/*
1075
* If interface is marked down and it is running,
1076
* then stop and disable it.
1077
*/
1078
bridge_stop(ifp, 1);
1079
} else if ((ifp->if_flags & IFF_UP) &&
1080
!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1081
/*
1082
* If interface is marked up and it is stopped, then
1083
* start it.
1084
*/
1085
BRIDGE_UNLOCK(sc);
1086
(*ifp->if_init)(sc);
1087
BRIDGE_LOCK(sc);
1088
}
1089
break;
1090
1091
case SIOCSIFMTU:
1092
oldmtu = sc->sc_ifp->if_mtu;
1093
1094
if (ifr->ifr_mtu < IF_MINMTU) {
1095
error = EXTERROR(EINVAL,
1096
"Requested MTU is lower than IF_MINMTU");
1097
break;
1098
}
1099
if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1100
sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1101
break;
1102
}
1103
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1104
error = (*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
1105
SIOCSIFMTU, (caddr_t)ifr);
1106
if (error != 0) {
1107
log(LOG_NOTICE, "%s: invalid MTU: %u for"
1108
" member %s\n", sc->sc_ifp->if_xname,
1109
ifr->ifr_mtu,
1110
bif->bif_ifp->if_xname);
1111
error = EINVAL;
1112
break;
1113
}
1114
}
1115
if (error) {
1116
/* Restore the previous MTU on all member interfaces. */
1117
ifr->ifr_mtu = oldmtu;
1118
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1119
(*bif->bif_ifp->if_ioctl)(bif->bif_ifp,
1120
SIOCSIFMTU, (caddr_t)ifr);
1121
}
1122
EXTERROR(error,
1123
"Failed to set MTU on member interface");
1124
} else {
1125
sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1126
}
1127
break;
1128
default:
1129
/*
1130
* drop the lock as ether_ioctl() will call bridge_start() and
1131
* cause the lock to be recursed.
1132
*/
1133
BRIDGE_UNLOCK(sc);
1134
error = ether_ioctl(ifp, cmd, data);
1135
BRIDGE_LOCK(sc);
1136
break;
1137
}
1138
1139
BRIDGE_UNLOCK(sc);
1140
1141
return (error);
1142
}
1143
1144
/*
1145
* bridge_mutecaps:
1146
*
1147
* Clear or restore unwanted capabilities on the member interface
1148
*/
1149
static void
1150
bridge_mutecaps(struct bridge_softc *sc)
1151
{
1152
struct bridge_iflist *bif;
1153
int enabled, mask;
1154
1155
BRIDGE_LOCK_ASSERT(sc);
1156
1157
/* Initial bitmask of capabilities to test */
1158
mask = BRIDGE_IFCAPS_MASK;
1159
1160
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1161
/* Every member must support it or it's disabled */
1162
mask &= bif->bif_savedcaps;
1163
}
1164
1165
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1166
enabled = bif->bif_ifp->if_capenable;
1167
enabled &= ~BRIDGE_IFCAPS_STRIP;
1168
/* Strip off mask bits and enable them again if allowed */
1169
enabled &= ~BRIDGE_IFCAPS_MASK;
1170
enabled |= mask;
1171
bridge_set_ifcap(sc, bif, enabled);
1172
}
1173
}
1174
1175
static void
1176
bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1177
{
1178
struct ifnet *ifp = bif->bif_ifp;
1179
struct ifreq ifr;
1180
int error, mask, stuck;
1181
1182
bzero(&ifr, sizeof(ifr));
1183
ifr.ifr_reqcap = set;
1184
1185
if (ifp->if_capenable != set) {
1186
error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1187
if (error)
1188
if_printf(sc->sc_ifp,
1189
"error setting capabilities on %s: %d\n",
1190
ifp->if_xname, error);
1191
mask = BRIDGE_IFCAPS_MASK | BRIDGE_IFCAPS_STRIP;
1192
stuck = ifp->if_capenable & mask & ~set;
1193
if (stuck != 0)
1194
if_printf(sc->sc_ifp,
1195
"can't disable some capabilities on %s: 0x%x\n",
1196
ifp->if_xname, stuck);
1197
}
1198
}
1199
1200
/*
1201
* bridge_lookup_member:
1202
*
1203
* Lookup a bridge member interface.
1204
*/
1205
static struct bridge_iflist *
1206
bridge_lookup_member(struct bridge_softc *sc, const char *name)
1207
{
1208
struct bridge_iflist *bif;
1209
struct ifnet *ifp;
1210
1211
BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1212
1213
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1214
ifp = bif->bif_ifp;
1215
if (strcmp(ifp->if_xname, name) == 0)
1216
return (bif);
1217
}
1218
1219
return (NULL);
1220
}
1221
1222
/*
1223
* bridge_lookup_member_if:
1224
*
1225
* Lookup a bridge member interface by ifnet*.
1226
*/
1227
static struct bridge_iflist *
1228
bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1229
{
1230
BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
1231
return (member_ifp->if_bridge);
1232
}
1233
1234
static void
1235
bridge_delete_member_cb(struct epoch_context *ctx)
1236
{
1237
struct bridge_iflist *bif;
1238
1239
bif = __containerof(ctx, struct bridge_iflist, bif_epoch_ctx);
1240
1241
free(bif, M_DEVBUF);
1242
}
1243
1244
/*
1245
* bridge_delete_member:
1246
*
1247
* Delete the specified member interface.
1248
*/
1249
static void
1250
bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
1251
int gone)
1252
{
1253
struct ifnet *ifs = bif->bif_ifp;
1254
struct ifnet *fif = NULL;
1255
struct bridge_iflist *bifl;
1256
1257
BRIDGE_LOCK_ASSERT(sc);
1258
1259
if (bif->bif_flags & IFBIF_STP)
1260
bstp_disable(&bif->bif_stp);
1261
1262
ifs->if_bridge = NULL;
1263
CK_LIST_REMOVE(bif, bif_next);
1264
1265
/*
1266
* If removing the interface that gave the bridge its mac address, set
1267
* the mac address of the bridge to the address of the next member, or
1268
* to its default address if no members are left.
1269
*/
1270
if (V_bridge_inherit_mac && sc->sc_ifaddr == ifs) {
1271
if (CK_LIST_EMPTY(&sc->sc_iflist)) {
1272
bcopy(&sc->sc_defaddr,
1273
IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1274
sc->sc_ifaddr = NULL;
1275
} else {
1276
bifl = CK_LIST_FIRST(&sc->sc_iflist);
1277
fif = bifl->bif_ifp;
1278
bcopy(IF_LLADDR(fif),
1279
IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1280
sc->sc_ifaddr = fif;
1281
}
1282
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1283
}
1284
1285
bridge_linkcheck(sc);
1286
bridge_mutecaps(sc); /* recalcuate now this interface is removed */
1287
BRIDGE_RT_LOCK(sc);
1288
bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
1289
BRIDGE_RT_UNLOCK(sc);
1290
KASSERT(bif->bif_addrcnt == 0,
1291
("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
1292
1293
ifs->if_bridge_output = NULL;
1294
ifs->if_bridge_input = NULL;
1295
ifs->if_bridge_linkstate = NULL;
1296
if (!gone) {
1297
switch (ifs->if_type) {
1298
case IFT_ETHER:
1299
case IFT_L2VLAN:
1300
/*
1301
* Take the interface out of promiscuous mode, but only
1302
* if it was promiscuous in the first place. It might
1303
* not be if we're in the bridge_ioctl_add() error path.
1304
*/
1305
if (ifs->if_flags & IFF_PROMISC)
1306
(void) ifpromisc(ifs, 0);
1307
break;
1308
1309
case IFT_GIF:
1310
break;
1311
1312
default:
1313
#ifdef DIAGNOSTIC
1314
panic("bridge_delete_member: impossible");
1315
#endif
1316
break;
1317
}
1318
/* Re-enable any interface capabilities */
1319
bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
1320
}
1321
bstp_destroy(&bif->bif_stp); /* prepare to free */
1322
1323
NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1324
}
1325
1326
/*
1327
* bridge_delete_span:
1328
*
1329
* Delete the specified span interface.
1330
*/
1331
static void
1332
bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
1333
{
1334
BRIDGE_LOCK_ASSERT(sc);
1335
1336
KASSERT(bif->bif_ifp->if_bridge == NULL,
1337
("%s: not a span interface", __func__));
1338
1339
CK_LIST_REMOVE(bif, bif_next);
1340
1341
NET_EPOCH_CALL(bridge_delete_member_cb, &bif->bif_epoch_ctx);
1342
}
1343
1344
static int
1345
bridge_ioctl_add(struct bridge_softc *sc, void *arg)
1346
{
1347
struct ifbreq *req = arg;
1348
struct bridge_iflist *bif = NULL;
1349
struct ifnet *ifs;
1350
int error = 0;
1351
1352
ifs = ifunit(req->ifbr_ifsname);
1353
if (ifs == NULL)
1354
return (EXTERROR(ENOENT, "No such interface",
1355
req->ifbr_ifsname));
1356
if (ifs->if_ioctl == NULL) /* must be supported */
1357
return (EXTERROR(EINVAL, "Interface must support ioctl(2)"));
1358
1359
/*
1360
* If the new interface is a vlan(4), it could be a bridge SVI.
1361
* Don't allow such things to be added to bridges.
1362
*/
1363
if (ifs->if_type == IFT_L2VLAN) {
1364
struct ifnet *parent;
1365
struct epoch_tracker et;
1366
bool is_bridge;
1367
1368
/*
1369
* Entering NET_EPOCH with BRIDGE_LOCK held, but this is okay
1370
* since we don't sleep here.
1371
*/
1372
NET_EPOCH_ENTER(et);
1373
parent = VLAN_TRUNKDEV(ifs);
1374
is_bridge = (parent != NULL && parent->if_type == IFT_BRIDGE);
1375
NET_EPOCH_EXIT(et);
1376
1377
if (is_bridge)
1378
return (EXTERROR(EINVAL,
1379
"Bridge SVI cannot be added to a bridge"));
1380
}
1381
1382
/* If it's in the span list, it can't be a member. */
1383
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1384
if (ifs == bif->bif_ifp)
1385
return (EXTERROR(EBUSY,
1386
"Span interface cannot be a member"));
1387
1388
if (ifs->if_bridge) {
1389
struct bridge_iflist *sbif = ifs->if_bridge;
1390
if (sbif->bif_sc == sc)
1391
return (EXTERROR(EEXIST,
1392
"Interface is already a member of this bridge"));
1393
1394
return (EXTERROR(EBUSY,
1395
"Interface is already a member of another bridge"));
1396
}
1397
1398
switch (ifs->if_type) {
1399
case IFT_ETHER:
1400
case IFT_L2VLAN:
1401
case IFT_GIF:
1402
/* permitted interface types */
1403
break;
1404
default:
1405
return (EXTERROR(EINVAL, "Unsupported interface type"));
1406
}
1407
1408
#ifdef INET6
1409
/*
1410
* Two valid inet6 addresses with link-local scope must not be
1411
* on the parent interface and the member interfaces at the
1412
* same time. This restriction is needed to prevent violation
1413
* of link-local scope zone. Attempts to add a member
1414
* interface which has inet6 addresses when the parent has
1415
* inet6 triggers removal of all inet6 addresses on the member
1416
* interface.
1417
*/
1418
1419
/* Check if the parent interface has a link-local scope addr. */
1420
if (V_allow_llz_overlap == 0 &&
1421
in6ifa_llaonifp(sc->sc_ifp) != NULL) {
1422
/*
1423
* If any, remove all inet6 addresses from the member
1424
* interfaces.
1425
*/
1426
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1427
if (in6ifa_llaonifp(bif->bif_ifp)) {
1428
in6_ifdetach(bif->bif_ifp);
1429
if_printf(sc->sc_ifp,
1430
"IPv6 addresses on %s have been removed "
1431
"before adding it as a member to prevent "
1432
"IPv6 address scope violation.\n",
1433
bif->bif_ifp->if_xname);
1434
}
1435
}
1436
if (in6ifa_llaonifp(ifs)) {
1437
in6_ifdetach(ifs);
1438
if_printf(sc->sc_ifp,
1439
"IPv6 addresses on %s have been removed "
1440
"before adding it as a member to prevent "
1441
"IPv6 address scope violation.\n",
1442
ifs->if_xname);
1443
}
1444
}
1445
#endif
1446
1447
/*
1448
* If member_ifaddrs is disabled, do not allow an interface with
1449
* assigned IP addresses to be added to a bridge. Skip this check
1450
* for gif interfaces, because the IP address assigned to a gif
1451
* interface is separate from the bridge's Ethernet segment.
1452
*/
1453
if (ifs->if_type != IFT_GIF) {
1454
struct ifaddr *ifa;
1455
1456
CK_STAILQ_FOREACH(ifa, &ifs->if_addrhead, ifa_link) {
1457
if (ifa->ifa_addr->sa_family != AF_INET &&
1458
ifa->ifa_addr->sa_family != AF_INET6)
1459
continue;
1460
1461
if (V_member_ifaddrs) {
1462
if_printf(sc->sc_ifp,
1463
"WARNING: Adding member interface %s which "
1464
"has an IP address assigned is deprecated "
1465
"and will be unsupported in a future "
1466
"release.\n", ifs->if_xname);
1467
break;
1468
} else {
1469
return (EXTERROR(EINVAL,
1470
"Member interface may not have "
1471
"an IP address assigned"));
1472
}
1473
}
1474
}
1475
1476
/* Allow the first Ethernet member to define the MTU */
1477
if (CK_LIST_EMPTY(&sc->sc_iflist))
1478
sc->sc_ifp->if_mtu = ifs->if_mtu;
1479
else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
1480
struct ifreq ifr;
1481
1482
snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s",
1483
ifs->if_xname);
1484
ifr.ifr_mtu = sc->sc_ifp->if_mtu;
1485
1486
error = (*ifs->if_ioctl)(ifs,
1487
SIOCSIFMTU, (caddr_t)&ifr);
1488
if (error != 0) {
1489
log(LOG_NOTICE, "%s: invalid MTU: %u for"
1490
" new member %s\n", sc->sc_ifp->if_xname,
1491
ifr.ifr_mtu,
1492
ifs->if_xname);
1493
return (EXTERROR(EINVAL,
1494
"Failed to set MTU on new member"));
1495
}
1496
}
1497
1498
bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
1499
if (bif == NULL)
1500
return (ENOMEM);
1501
1502
bif->bif_sc = sc;
1503
bif->bif_ifp = ifs;
1504
bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER;
1505
bif->bif_savedcaps = ifs->if_capenable;
1506
bif->bif_vlanproto = ETHERTYPE_VLAN;
1507
bif->bif_pvid = sc->sc_defpvid;
1508
if (sc->sc_flags & IFBRF_DEFQINQ)
1509
bif->bif_flags |= IFBIF_QINQ;
1510
1511
/*
1512
* Assign the interface's MAC address to the bridge if it's the first
1513
* member and the MAC address of the bridge has not been changed from
1514
* the default randomly generated one.
1515
*/
1516
if (V_bridge_inherit_mac && CK_LIST_EMPTY(&sc->sc_iflist) &&
1517
!memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr.octet, ETHER_ADDR_LEN)) {
1518
bcopy(IF_LLADDR(ifs), IF_LLADDR(sc->sc_ifp), ETHER_ADDR_LEN);
1519
sc->sc_ifaddr = ifs;
1520
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
1521
}
1522
1523
ifs->if_bridge = bif;
1524
ifs->if_bridge_output = bridge_output;
1525
ifs->if_bridge_input = bridge_input;
1526
ifs->if_bridge_linkstate = bridge_linkstate;
1527
bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
1528
/*
1529
* XXX: XLOCK HERE!?!
1530
*
1531
* NOTE: insert_***HEAD*** should be safe for the traversals.
1532
*/
1533
CK_LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next);
1534
1535
/* Set interface capabilities to the intersection set of all members */
1536
bridge_mutecaps(sc);
1537
bridge_linkcheck(sc);
1538
1539
/* Place the interface into promiscuous mode */
1540
switch (ifs->if_type) {
1541
case IFT_ETHER:
1542
case IFT_L2VLAN:
1543
error = ifpromisc(ifs, 1);
1544
break;
1545
}
1546
1547
if (error)
1548
bridge_delete_member(sc, bif, 0);
1549
return (error);
1550
}
1551
1552
static int
1553
bridge_ioctl_del(struct bridge_softc *sc, void *arg)
1554
{
1555
struct ifbreq *req = arg;
1556
struct bridge_iflist *bif;
1557
1558
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1559
if (bif == NULL)
1560
return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1561
1562
bridge_delete_member(sc, bif, 0);
1563
1564
return (0);
1565
}
1566
1567
static int
1568
bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
1569
{
1570
struct ifbreq *req = arg;
1571
struct bridge_iflist *bif;
1572
struct bstp_port *bp;
1573
1574
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1575
if (bif == NULL)
1576
return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1577
1578
bp = &bif->bif_stp;
1579
req->ifbr_ifsflags = bif->bif_flags;
1580
req->ifbr_state = bp->bp_state;
1581
req->ifbr_priority = bp->bp_priority;
1582
req->ifbr_path_cost = bp->bp_path_cost;
1583
req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1584
req->ifbr_proto = bp->bp_protover;
1585
req->ifbr_role = bp->bp_role;
1586
req->ifbr_stpflags = bp->bp_flags;
1587
req->ifbr_addrcnt = bif->bif_addrcnt;
1588
req->ifbr_addrmax = bif->bif_addrmax;
1589
req->ifbr_addrexceeded = bif->bif_addrexceeded;
1590
req->ifbr_pvid = bif->bif_pvid;
1591
req->ifbr_vlanproto = bif->bif_vlanproto;
1592
1593
/* Copy STP state options as flags */
1594
if (bp->bp_operedge)
1595
req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
1596
if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
1597
req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
1598
if (bp->bp_ptp_link)
1599
req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
1600
if (bp->bp_flags & BSTP_PORT_AUTOPTP)
1601
req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
1602
if (bp->bp_flags & BSTP_PORT_ADMEDGE)
1603
req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
1604
if (bp->bp_flags & BSTP_PORT_ADMCOST)
1605
req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
1606
return (0);
1607
}
1608
1609
static int
1610
bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
1611
{
1612
struct epoch_tracker et;
1613
struct ifbreq *req = arg;
1614
struct bridge_iflist *bif;
1615
struct bstp_port *bp;
1616
int error;
1617
1618
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1619
if (bif == NULL)
1620
return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1621
bp = &bif->bif_stp;
1622
1623
if (req->ifbr_ifsflags & IFBIF_SPAN)
1624
/* SPAN is readonly */
1625
return (EXTERROR(EINVAL, "Span interface cannot be modified"));
1626
1627
NET_EPOCH_ENTER(et);
1628
1629
if (req->ifbr_ifsflags & IFBIF_STP) {
1630
if ((bif->bif_flags & IFBIF_STP) == 0) {
1631
error = bstp_enable(&bif->bif_stp);
1632
if (error) {
1633
NET_EPOCH_EXIT(et);
1634
return (EXTERROR(error,
1635
"Failed to enable STP"));
1636
}
1637
}
1638
} else {
1639
if ((bif->bif_flags & IFBIF_STP) != 0)
1640
bstp_disable(&bif->bif_stp);
1641
}
1642
1643
/* Pass on STP flags */
1644
bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
1645
bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
1646
bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
1647
bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
1648
1649
/* Save the bits relating to the bridge */
1650
bif->bif_flags = req->ifbr_ifsflags & IFBIFMASK;
1651
1652
NET_EPOCH_EXIT(et);
1653
1654
return (0);
1655
}
1656
1657
static int
1658
bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
1659
{
1660
struct ifbrparam *param = arg;
1661
1662
sc->sc_brtmax = param->ifbrp_csize;
1663
bridge_rttrim(sc);
1664
1665
return (0);
1666
}
1667
1668
static int
1669
bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
1670
{
1671
struct ifbrparam *param = arg;
1672
1673
param->ifbrp_csize = sc->sc_brtmax;
1674
1675
return (0);
1676
}
1677
1678
static int
1679
bridge_ioctl_gifs(struct bridge_softc *sc, void *arg)
1680
{
1681
struct ifbifconf *bifc = arg;
1682
struct bridge_iflist *bif;
1683
struct ifbreq breq;
1684
char *buf, *outbuf;
1685
int count, buflen, len, error = 0;
1686
1687
count = 0;
1688
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next)
1689
count++;
1690
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
1691
count++;
1692
1693
buflen = sizeof(breq) * count;
1694
if (bifc->ifbic_len == 0) {
1695
bifc->ifbic_len = buflen;
1696
return (0);
1697
}
1698
outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1699
if (outbuf == NULL)
1700
return (ENOMEM);
1701
1702
count = 0;
1703
buf = outbuf;
1704
len = min(bifc->ifbic_len, buflen);
1705
bzero(&breq, sizeof(breq));
1706
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
1707
if (len < sizeof(breq))
1708
break;
1709
1710
strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1711
sizeof(breq.ifbr_ifsname));
1712
/* Fill in the ifbreq structure */
1713
error = bridge_ioctl_gifflags(sc, &breq);
1714
if (error)
1715
break;
1716
memcpy(buf, &breq, sizeof(breq));
1717
count++;
1718
buf += sizeof(breq);
1719
len -= sizeof(breq);
1720
}
1721
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
1722
if (len < sizeof(breq))
1723
break;
1724
1725
strlcpy(breq.ifbr_ifsname, bif->bif_ifp->if_xname,
1726
sizeof(breq.ifbr_ifsname));
1727
breq.ifbr_ifsflags = bif->bif_flags;
1728
breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;
1729
memcpy(buf, &breq, sizeof(breq));
1730
count++;
1731
buf += sizeof(breq);
1732
len -= sizeof(breq);
1733
}
1734
1735
bifc->ifbic_len = sizeof(breq) * count;
1736
error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);
1737
free(outbuf, M_TEMP);
1738
return (error);
1739
}
1740
1741
static int
1742
bridge_ioctl_rts(struct bridge_softc *sc, void *arg)
1743
{
1744
struct ifbaconf *bac = arg;
1745
struct bridge_rtnode *brt;
1746
struct ifbareq bareq;
1747
char *buf, *outbuf;
1748
int count, buflen, len, error = 0;
1749
1750
if (bac->ifbac_len == 0)
1751
return (0);
1752
1753
count = 0;
1754
CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)
1755
count++;
1756
buflen = sizeof(bareq) * count;
1757
1758
outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
1759
if (outbuf == NULL)
1760
return (ENOMEM);
1761
1762
count = 0;
1763
buf = outbuf;
1764
len = min(bac->ifbac_len, buflen);
1765
bzero(&bareq, sizeof(bareq));
1766
CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
1767
if (len < sizeof(bareq))
1768
goto out;
1769
strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname,
1770
sizeof(bareq.ifba_ifsname));
1771
memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr));
1772
bareq.ifba_vlan = brt->brt_vlan;
1773
if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
1774
time_uptime < brt->brt_expire)
1775
bareq.ifba_expire = brt->brt_expire - time_uptime;
1776
else
1777
bareq.ifba_expire = 0;
1778
bareq.ifba_flags = brt->brt_flags;
1779
1780
memcpy(buf, &bareq, sizeof(bareq));
1781
count++;
1782
buf += sizeof(bareq);
1783
len -= sizeof(bareq);
1784
}
1785
out:
1786
bac->ifbac_len = sizeof(bareq) * count;
1787
error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);
1788
free(outbuf, M_TEMP);
1789
return (error);
1790
}
1791
1792
static int
1793
bridge_ioctl_saddr(struct bridge_softc *sc, void *arg)
1794
{
1795
struct ifbareq *req = arg;
1796
struct bridge_iflist *bif;
1797
struct epoch_tracker et;
1798
int error;
1799
1800
NET_EPOCH_ENTER(et);
1801
bif = bridge_lookup_member(sc, req->ifba_ifsname);
1802
if (bif == NULL) {
1803
NET_EPOCH_EXIT(et);
1804
return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1805
}
1806
1807
/* bridge_rtupdate() may acquire the lock. */
1808
error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
1809
req->ifba_flags);
1810
NET_EPOCH_EXIT(et);
1811
1812
return (error);
1813
}
1814
1815
static int
1816
bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
1817
{
1818
struct ifbrparam *param = arg;
1819
1820
sc->sc_brttimeout = param->ifbrp_ctime;
1821
return (0);
1822
}
1823
1824
static int
1825
bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
1826
{
1827
struct ifbrparam *param = arg;
1828
1829
param->ifbrp_ctime = sc->sc_brttimeout;
1830
return (0);
1831
}
1832
1833
static int
1834
bridge_ioctl_daddr(struct bridge_softc *sc, void *arg)
1835
{
1836
struct ifbareq *req = arg;
1837
int vlan = req->ifba_vlan;
1838
1839
/* Userspace uses '0' to mean 'any vlan' */
1840
if (vlan == 0)
1841
vlan = DOT1Q_VID_RSVD_IMPL;
1842
1843
return (bridge_rtdaddr(sc, req->ifba_dst, vlan));
1844
}
1845
1846
static int
1847
bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
1848
{
1849
struct ifbreq *req = arg;
1850
1851
BRIDGE_RT_LOCK(sc);
1852
bridge_rtflush(sc, req->ifbr_ifsflags);
1853
BRIDGE_RT_UNLOCK(sc);
1854
1855
return (0);
1856
}
1857
1858
static int
1859
bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
1860
{
1861
struct ifbrparam *param = arg;
1862
struct bstp_state *bs = &sc->sc_stp;
1863
1864
param->ifbrp_prio = bs->bs_bridge_priority;
1865
return (0);
1866
}
1867
1868
static int
1869
bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
1870
{
1871
struct ifbrparam *param = arg;
1872
1873
return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
1874
}
1875
1876
static int
1877
bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
1878
{
1879
struct ifbrparam *param = arg;
1880
struct bstp_state *bs = &sc->sc_stp;
1881
1882
param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
1883
return (0);
1884
}
1885
1886
static int
1887
bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
1888
{
1889
struct ifbrparam *param = arg;
1890
1891
return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
1892
}
1893
1894
static int
1895
bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
1896
{
1897
struct ifbrparam *param = arg;
1898
struct bstp_state *bs = &sc->sc_stp;
1899
1900
param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
1901
return (0);
1902
}
1903
1904
static int
1905
bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
1906
{
1907
struct ifbrparam *param = arg;
1908
1909
return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
1910
}
1911
1912
static int
1913
bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
1914
{
1915
struct ifbrparam *param = arg;
1916
struct bstp_state *bs = &sc->sc_stp;
1917
1918
param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
1919
return (0);
1920
}
1921
1922
static int
1923
bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
1924
{
1925
struct ifbrparam *param = arg;
1926
1927
return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
1928
}
1929
1930
static int
1931
bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
1932
{
1933
struct ifbreq *req = arg;
1934
struct bridge_iflist *bif;
1935
1936
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1937
if (bif == NULL)
1938
return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1939
1940
return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
1941
}
1942
1943
static int
1944
bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
1945
{
1946
struct ifbreq *req = arg;
1947
struct bridge_iflist *bif;
1948
1949
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1950
if (bif == NULL)
1951
return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1952
1953
return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
1954
}
1955
1956
static int
1957
bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
1958
{
1959
struct ifbreq *req = arg;
1960
struct bridge_iflist *bif;
1961
1962
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1963
if (bif == NULL)
1964
return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1965
1966
bif->bif_addrmax = req->ifbr_addrmax;
1967
return (0);
1968
}
1969
1970
static int
1971
bridge_ioctl_sifpvid(struct bridge_softc *sc, void *arg)
1972
{
1973
struct ifbreq *req = arg;
1974
struct bridge_iflist *bif;
1975
1976
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
1977
if (bif == NULL)
1978
return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1979
1980
if (req->ifbr_pvid > DOT1Q_VID_MAX)
1981
return (EXTERROR(EINVAL, "Invalid VLAN ID"));
1982
1983
bif->bif_pvid = req->ifbr_pvid;
1984
return (0);
1985
}
1986
1987
static int
1988
bridge_ioctl_sifvlanset(struct bridge_softc *sc, void *arg)
1989
{
1990
struct ifbif_vlan_req *req = arg;
1991
struct bridge_iflist *bif;
1992
1993
bif = bridge_lookup_member(sc, req->bv_ifname);
1994
if (bif == NULL)
1995
return (EXTERROR(ENOENT, "Interface is not a bridge member"));
1996
1997
/* Reject invalid VIDs. */
1998
if (BRVLAN_TEST(&req->bv_set, DOT1Q_VID_NULL) ||
1999
BRVLAN_TEST(&req->bv_set, DOT1Q_VID_RSVD_IMPL))
2000
return (EXTERROR(EINVAL, "Invalid VLAN ID in set"));
2001
2002
switch (req->bv_op) {
2003
/* Replace the existing vlan set with the new set */
2004
case BRDG_VLAN_OP_SET:
2005
BIT_COPY(BRVLAN_SETSIZE, &req->bv_set, &bif->bif_vlan_set);
2006
break;
2007
2008
/* Modify the existing vlan set to add the given vlans */
2009
case BRDG_VLAN_OP_ADD:
2010
BIT_OR(BRVLAN_SETSIZE, &bif->bif_vlan_set, &req->bv_set);
2011
break;
2012
2013
/* Modify the existing vlan set to remove the given vlans */
2014
case BRDG_VLAN_OP_DEL:
2015
BIT_ANDNOT(BRVLAN_SETSIZE, &bif->bif_vlan_set, &req->bv_set);
2016
break;
2017
2018
/* Invalid or unknown operation */
2019
default:
2020
return (EXTERROR(EINVAL,
2021
"Unsupported BRDGSIFVLANSET operation"));
2022
}
2023
2024
return (0);
2025
}
2026
2027
static int
2028
bridge_ioctl_gifvlanset(struct bridge_softc *sc, void *arg)
2029
{
2030
struct ifbif_vlan_req *req = arg;
2031
struct bridge_iflist *bif;
2032
2033
bif = bridge_lookup_member(sc, req->bv_ifname);
2034
if (bif == NULL)
2035
return (EXTERROR(ENOENT, "Interface is not a bridge member"));
2036
2037
BIT_COPY(BRVLAN_SETSIZE, &bif->bif_vlan_set, &req->bv_set);
2038
return (0);
2039
}
2040
2041
static int
2042
bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
2043
{
2044
struct ifbreq *req = arg;
2045
struct bridge_iflist *bif = NULL;
2046
struct ifnet *ifs;
2047
2048
ifs = ifunit(req->ifbr_ifsname);
2049
if (ifs == NULL)
2050
return (EXTERROR(ENOENT, "No such interface"));
2051
2052
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2053
if (ifs == bif->bif_ifp)
2054
return (EXTERROR(EBUSY,
2055
"Interface is already a span port"));
2056
2057
if (ifs->if_bridge != NULL)
2058
return (EXTERROR(EEXIST,
2059
"Interface is already a bridge member"));
2060
2061
switch (ifs->if_type) {
2062
case IFT_ETHER:
2063
case IFT_GIF:
2064
case IFT_L2VLAN:
2065
break;
2066
default:
2067
return (EXTERROR(EINVAL, "Unsupported interface type"));
2068
}
2069
2070
bif = malloc(sizeof(*bif), M_DEVBUF, M_NOWAIT|M_ZERO);
2071
if (bif == NULL)
2072
return (ENOMEM);
2073
2074
bif->bif_ifp = ifs;
2075
bif->bif_flags = IFBIF_SPAN;
2076
2077
CK_LIST_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
2078
2079
return (0);
2080
}
2081
2082
static int
2083
bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
2084
{
2085
struct ifbreq *req = arg;
2086
struct bridge_iflist *bif;
2087
struct ifnet *ifs;
2088
2089
ifs = ifunit(req->ifbr_ifsname);
2090
if (ifs == NULL)
2091
return (EXTERROR(ENOENT, "No such interface"));
2092
2093
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2094
if (ifs == bif->bif_ifp)
2095
break;
2096
2097
if (bif == NULL)
2098
return (EXTERROR(ENOENT, "Interface is not a span port"));
2099
2100
bridge_delete_span(sc, bif);
2101
2102
return (0);
2103
}
2104
2105
static int
2106
bridge_ioctl_gbparam(struct bridge_softc *sc, void *arg)
2107
{
2108
struct ifbropreq *req = arg;
2109
struct bstp_state *bs = &sc->sc_stp;
2110
struct bstp_port *root_port;
2111
2112
req->ifbop_maxage = bs->bs_bridge_max_age >> 8;
2113
req->ifbop_hellotime = bs->bs_bridge_htime >> 8;
2114
req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;
2115
2116
root_port = bs->bs_root_port;
2117
if (root_port == NULL)
2118
req->ifbop_root_port = 0;
2119
else
2120
req->ifbop_root_port = root_port->bp_ifp->if_index;
2121
2122
req->ifbop_holdcount = bs->bs_txholdcount;
2123
req->ifbop_priority = bs->bs_bridge_priority;
2124
req->ifbop_protocol = bs->bs_protover;
2125
req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;
2126
req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;
2127
req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;
2128
req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;
2129
req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;
2130
req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;
2131
2132
return (0);
2133
}
2134
2135
static int
2136
bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
2137
{
2138
struct ifbrparam *param = arg;
2139
2140
param->ifbrp_cexceeded = sc->sc_brtexceeded;
2141
return (0);
2142
}
2143
2144
static int
2145
bridge_ioctl_gifsstp(struct bridge_softc *sc, void *arg)
2146
{
2147
struct ifbpstpconf *bifstp = arg;
2148
struct bridge_iflist *bif;
2149
struct bstp_port *bp;
2150
struct ifbpstpreq bpreq;
2151
char *buf, *outbuf;
2152
int count, buflen, len, error = 0;
2153
2154
count = 0;
2155
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2156
if ((bif->bif_flags & IFBIF_STP) != 0)
2157
count++;
2158
}
2159
2160
buflen = sizeof(bpreq) * count;
2161
if (bifstp->ifbpstp_len == 0) {
2162
bifstp->ifbpstp_len = buflen;
2163
return (0);
2164
}
2165
2166
outbuf = malloc(buflen, M_TEMP, M_NOWAIT | M_ZERO);
2167
if (outbuf == NULL)
2168
return (ENOMEM);
2169
2170
count = 0;
2171
buf = outbuf;
2172
len = min(bifstp->ifbpstp_len, buflen);
2173
bzero(&bpreq, sizeof(bpreq));
2174
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2175
if (len < sizeof(bpreq))
2176
break;
2177
2178
if ((bif->bif_flags & IFBIF_STP) == 0)
2179
continue;
2180
2181
bp = &bif->bif_stp;
2182
bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;
2183
bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;
2184
bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;
2185
bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;
2186
bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id;
2187
bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;
2188
2189
memcpy(buf, &bpreq, sizeof(bpreq));
2190
count++;
2191
buf += sizeof(bpreq);
2192
len -= sizeof(bpreq);
2193
}
2194
2195
bifstp->ifbpstp_len = sizeof(bpreq) * count;
2196
error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len);
2197
free(outbuf, M_TEMP);
2198
return (error);
2199
}
2200
2201
static int
2202
bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
2203
{
2204
struct ifbrparam *param = arg;
2205
2206
return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
2207
}
2208
2209
static int
2210
bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
2211
{
2212
struct ifbrparam *param = arg;
2213
2214
return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
2215
}
2216
2217
static int
2218
bridge_ioctl_gflags(struct bridge_softc *sc, void *arg)
2219
{
2220
struct ifbrparam *param = arg;
2221
2222
param->ifbrp_flags = sc->sc_flags;
2223
2224
return (0);
2225
}
2226
2227
static int
2228
bridge_ioctl_sflags(struct bridge_softc *sc, void *arg)
2229
{
2230
struct ifbrparam *param = arg;
2231
2232
sc->sc_flags = param->ifbrp_flags;
2233
2234
return (0);
2235
}
2236
2237
static int
2238
bridge_ioctl_gdefpvid(struct bridge_softc *sc, void *arg)
2239
{
2240
struct ifbrparam *param = arg;
2241
2242
param->ifbrp_defpvid = sc->sc_defpvid;
2243
2244
return (0);
2245
}
2246
2247
static int
2248
bridge_ioctl_sdefpvid(struct bridge_softc *sc, void *arg)
2249
{
2250
struct ifbrparam *param = arg;
2251
2252
/* Reject invalid VIDs, but allow 0 to mean 'none'. */
2253
if (param->ifbrp_defpvid > DOT1Q_VID_MAX)
2254
return (EINVAL);
2255
2256
sc->sc_defpvid = param->ifbrp_defpvid;
2257
2258
return (0);
2259
}
2260
2261
static int
2262
bridge_ioctl_svlanproto(struct bridge_softc *sc, void *arg)
2263
{
2264
struct ifbreq *req = arg;
2265
struct bridge_iflist *bif;
2266
2267
bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2268
if (bif == NULL)
2269
return (EXTERROR(ENOENT, "Interface is not a bridge member"));
2270
2271
if (req->ifbr_vlanproto != ETHERTYPE_VLAN &&
2272
req->ifbr_vlanproto != ETHERTYPE_QINQ)
2273
return (EXTERROR(EINVAL, "Invalid VLAN protocol"));
2274
2275
bif->bif_vlanproto = req->ifbr_vlanproto;
2276
2277
return (0);
2278
}
2279
/*
2280
* bridge_ifdetach:
2281
*
2282
* Detach an interface from a bridge. Called when a member
2283
* interface is detaching.
2284
*/
2285
static void
2286
bridge_ifdetach(void *arg __unused, struct ifnet *ifp)
2287
{
2288
struct bridge_iflist *bif = ifp->if_bridge;
2289
struct bridge_softc *sc = NULL;
2290
2291
if (bif)
2292
sc = bif->bif_sc;
2293
2294
if (V_bridge_cloner == NULL) {
2295
/*
2296
* This detach handler can be called after
2297
* vnet_bridge_uninit(). Just return in that case.
2298
*/
2299
return;
2300
}
2301
/* Check if the interface is a bridge member */
2302
if (sc != NULL) {
2303
BRIDGE_LOCK(sc);
2304
bridge_delete_member(sc, bif, 1);
2305
BRIDGE_UNLOCK(sc);
2306
return;
2307
}
2308
2309
/* Check if the interface is a span port */
2310
BRIDGE_LIST_LOCK();
2311
LIST_FOREACH(sc, &V_bridge_list, sc_list) {
2312
BRIDGE_LOCK(sc);
2313
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next)
2314
if (ifp == bif->bif_ifp) {
2315
bridge_delete_span(sc, bif);
2316
break;
2317
}
2318
2319
BRIDGE_UNLOCK(sc);
2320
}
2321
BRIDGE_LIST_UNLOCK();
2322
}
2323
2324
/*
2325
* bridge_init:
2326
*
2327
* Initialize a bridge interface.
2328
*/
2329
static void
2330
bridge_init(void *xsc)
2331
{
2332
struct bridge_softc *sc = (struct bridge_softc *)xsc;
2333
struct ifnet *ifp = sc->sc_ifp;
2334
2335
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2336
return;
2337
2338
BRIDGE_LOCK(sc);
2339
callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz,
2340
bridge_timer, sc);
2341
2342
ifp->if_drv_flags |= IFF_DRV_RUNNING;
2343
bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */
2344
2345
BRIDGE_UNLOCK(sc);
2346
}
2347
2348
/*
2349
* bridge_stop:
2350
*
2351
* Stop the bridge interface.
2352
*/
2353
static void
2354
bridge_stop(struct ifnet *ifp, int disable)
2355
{
2356
struct bridge_softc *sc = ifp->if_softc;
2357
2358
BRIDGE_LOCK_ASSERT(sc);
2359
2360
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2361
return;
2362
2363
BRIDGE_RT_LOCK(sc);
2364
callout_stop(&sc->sc_brcallout);
2365
2366
bstp_stop(&sc->sc_stp);
2367
2368
bridge_rtflush(sc, IFBF_FLUSHDYN);
2369
BRIDGE_RT_UNLOCK(sc);
2370
2371
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
2372
}
2373
2374
/*
2375
* bridge_enqueue:
2376
*
2377
* Enqueue a packet on a bridge member interface.
2378
*
2379
*/
2380
static int
2381
bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m,
2382
struct bridge_iflist *bif)
2383
{
2384
int len, err = 0;
2385
short mflags;
2386
struct mbuf *m0;
2387
2388
/*
2389
* Find the bridge member port this packet is being sent on, if the
2390
* caller didn't already provide it.
2391
*/
2392
if (bif == NULL)
2393
bif = bridge_lookup_member_if(sc, dst_ifp);
2394
if (bif == NULL) {
2395
/* Perhaps the interface was removed from the bridge */
2396
m_freem(m);
2397
return (EINVAL);
2398
}
2399
2400
/* Do VLAN filtering. */
2401
if (!bridge_vfilter_out(bif, m)) {
2402
m_freem(m);
2403
return (0);
2404
}
2405
2406
/* We may be sending a fragment so traverse the mbuf */
2407
for (; m; m = m0) {
2408
m0 = m->m_nextpkt;
2409
m->m_nextpkt = NULL;
2410
len = m->m_pkthdr.len;
2411
mflags = m->m_flags;
2412
2413
/*
2414
* If the native VLAN ID of the outgoing interface matches the
2415
* VLAN ID of the frame, remove the VLAN tag.
2416
*/
2417
if (bif->bif_pvid != DOT1Q_VID_NULL &&
2418
VLANTAGOF(m) == bif->bif_pvid) {
2419
m->m_flags &= ~M_VLANTAG;
2420
m->m_pkthdr.ether_vtag = 0;
2421
}
2422
2423
/*
2424
* There are two cases where we have to insert our own tag:
2425
* if the member interface doesn't support hardware tagging,
2426
* or if the tag proto is not 802.1q.
2427
*/
2428
if ((m->m_flags & M_VLANTAG) &&
2429
((dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0 ||
2430
bif->bif_vlanproto != ETHERTYPE_VLAN)) {
2431
m = ether_vlanencap_proto(m, m->m_pkthdr.ether_vtag,
2432
bif->bif_vlanproto);
2433
if (m == NULL) {
2434
if_printf(dst_ifp,
2435
"unable to prepend VLAN header\n");
2436
if_inc_counter(dst_ifp, IFCOUNTER_OERRORS, 1);
2437
continue;
2438
}
2439
m->m_flags &= ~M_VLANTAG;
2440
}
2441
2442
M_ASSERTPKTHDR(m); /* We shouldn't transmit mbuf without pkthdr */
2443
/*
2444
* XXXZL: gif(4) requires the af to be saved in csum_data field
2445
* so that gif_transmit() routine can pull it back.
2446
*/
2447
if (dst_ifp->if_type == IFT_GIF)
2448
m->m_pkthdr.csum_data = AF_LINK;
2449
if ((err = dst_ifp->if_transmit(dst_ifp, m))) {
2450
int n;
2451
2452
for (m = m0, n = 1; m != NULL; m = m0, n++) {
2453
m0 = m->m_nextpkt;
2454
m_freem(m);
2455
}
2456
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, n);
2457
break;
2458
}
2459
2460
if_inc_counter(sc->sc_ifp, IFCOUNTER_OPACKETS, 1);
2461
if_inc_counter(sc->sc_ifp, IFCOUNTER_OBYTES, len);
2462
if (mflags & M_MCAST)
2463
if_inc_counter(sc->sc_ifp, IFCOUNTER_OMCASTS, 1);
2464
}
2465
2466
return (err);
2467
}
2468
2469
/*
2470
* bridge_dummynet:
2471
*
2472
* Receive a queued packet from dummynet and pass it on to the output
2473
* interface.
2474
*
2475
* The mbuf has the Ethernet header already attached.
2476
*/
2477
static void
2478
bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
2479
{
2480
struct bridge_iflist *bif = ifp->if_bridge;
2481
struct bridge_softc *sc = NULL;
2482
2483
if (bif)
2484
sc = bif->bif_sc;
2485
2486
/*
2487
* The packet didnt originate from a member interface. This should only
2488
* ever happen if a member interface is removed while packets are
2489
* queued for it.
2490
*/
2491
if (sc == NULL) {
2492
m_freem(m);
2493
return;
2494
}
2495
2496
if (PFIL_HOOKED_OUT_46) {
2497
if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
2498
return;
2499
if (m == NULL)
2500
return;
2501
}
2502
2503
bridge_enqueue(sc, ifp, m, NULL);
2504
}
2505
2506
/*
2507
* bridge_output:
2508
*
2509
* Send output from a bridge member interface. This
2510
* performs the bridging function for locally originated
2511
* packets.
2512
*
2513
* The mbuf has the Ethernet header already attached. We must
2514
* enqueue or free the mbuf before returning.
2515
*/
2516
static int
2517
bridge_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
2518
struct rtentry *rt)
2519
{
2520
struct ether_header *eh;
2521
struct bridge_iflist *sbif;
2522
struct ifnet *bifp, *dst_if;
2523
struct bridge_softc *sc;
2524
ether_vlanid_t vlan;
2525
2526
NET_EPOCH_ASSERT();
2527
2528
if (m->m_len < ETHER_HDR_LEN) {
2529
m = m_pullup(m, ETHER_HDR_LEN);
2530
if (m == NULL)
2531
return (0);
2532
}
2533
2534
sbif = ifp->if_bridge;
2535
sc = sbif->bif_sc;
2536
bifp = sc->sc_ifp;
2537
2538
eh = mtod(m, struct ether_header *);
2539
vlan = VLANTAGOF(m);
2540
2541
/*
2542
* If bridge is down, but the original output interface is up,
2543
* go ahead and send out that interface. Otherwise, the packet
2544
* is dropped below.
2545
*/
2546
if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2547
dst_if = ifp;
2548
goto sendunicast;
2549
}
2550
2551
/*
2552
* If the packet is a multicast, or we don't know a better way to
2553
* get there, send to all interfaces.
2554
*/
2555
if (ETHER_IS_MULTICAST(eh->ether_dhost))
2556
dst_if = NULL;
2557
else
2558
dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
2559
/* Tap any traffic not passing back out the originating interface */
2560
if (dst_if != ifp)
2561
ETHER_BPF_MTAP(bifp, m);
2562
if (dst_if == NULL) {
2563
struct bridge_iflist *bif;
2564
struct mbuf *mc;
2565
int used = 0;
2566
2567
bridge_span(sc, m);
2568
2569
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
2570
dst_if = bif->bif_ifp;
2571
2572
if (dst_if->if_type == IFT_GIF)
2573
continue;
2574
if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2575
continue;
2576
2577
/*
2578
* If this is not the original output interface,
2579
* and the interface is participating in spanning
2580
* tree, make sure the port is in a state that
2581
* allows forwarding.
2582
*/
2583
if (dst_if != ifp && (bif->bif_flags & IFBIF_STP) &&
2584
bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2585
continue;
2586
2587
if (CK_LIST_NEXT(bif, bif_next) == NULL) {
2588
used = 1;
2589
mc = m;
2590
} else {
2591
mc = m_dup(m, M_NOWAIT);
2592
if (mc == NULL) {
2593
if_inc_counter(bifp, IFCOUNTER_OERRORS, 1);
2594
continue;
2595
}
2596
}
2597
2598
bridge_enqueue(sc, dst_if, mc, bif);
2599
}
2600
if (used == 0)
2601
m_freem(m);
2602
return (0);
2603
}
2604
2605
sendunicast:
2606
/*
2607
* XXX Spanning tree consideration here?
2608
*/
2609
2610
bridge_span(sc, m);
2611
if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0) {
2612
m_freem(m);
2613
return (0);
2614
}
2615
2616
bridge_enqueue(sc, dst_if, m, NULL);
2617
return (0);
2618
}
2619
2620
/*
2621
* bridge_transmit:
2622
*
2623
* Do output on a bridge.
2624
*
2625
*/
2626
static int
2627
bridge_transmit(struct ifnet *ifp, struct mbuf *m)
2628
{
2629
struct bridge_softc *sc;
2630
struct ether_header *eh;
2631
struct ifnet *dst_if;
2632
int error = 0;
2633
ether_vlanid_t vlan;
2634
2635
sc = ifp->if_softc;
2636
2637
ETHER_BPF_MTAP(ifp, m);
2638
2639
eh = mtod(m, struct ether_header *);
2640
vlan = VLANTAGOF(m);
2641
2642
if (((m->m_flags & (M_BCAST|M_MCAST)) == 0) &&
2643
(dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan)) != NULL) {
2644
error = bridge_enqueue(sc, dst_if, m, NULL);
2645
} else
2646
bridge_broadcast(sc, ifp, m, 0);
2647
2648
return (error);
2649
}
2650
2651
#ifdef ALTQ
2652
static void
2653
bridge_altq_start(if_t ifp)
2654
{
2655
struct ifaltq *ifq = &ifp->if_snd;
2656
struct mbuf *m;
2657
2658
IFQ_LOCK(ifq);
2659
IFQ_DEQUEUE_NOLOCK(ifq, m);
2660
while (m != NULL) {
2661
bridge_transmit(ifp, m);
2662
IFQ_DEQUEUE_NOLOCK(ifq, m);
2663
}
2664
IFQ_UNLOCK(ifq);
2665
}
2666
2667
static int
2668
bridge_altq_transmit(if_t ifp, struct mbuf *m)
2669
{
2670
int err;
2671
2672
if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
2673
IFQ_ENQUEUE(&ifp->if_snd, m, err);
2674
if (err == 0)
2675
bridge_altq_start(ifp);
2676
} else
2677
err = bridge_transmit(ifp, m);
2678
2679
return (err);
2680
}
2681
#endif /* ALTQ */
2682
2683
/*
2684
* The ifp->if_qflush entry point for if_bridge(4) is no-op.
2685
*/
2686
static void
2687
bridge_qflush(struct ifnet *ifp __unused)
2688
{
2689
}
2690
2691
/*
2692
* bridge_forward:
2693
*
2694
* The forwarding function of the bridge.
2695
*
2696
* NOTE: Releases the lock on return.
2697
*/
2698
static void
2699
bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
2700
struct mbuf *m)
2701
{
2702
struct bridge_iflist *dbif;
2703
struct ifnet *src_if, *dst_if, *ifp;
2704
struct ether_header *eh;
2705
uint8_t *dst;
2706
int error;
2707
ether_vlanid_t vlan;
2708
2709
NET_EPOCH_ASSERT();
2710
2711
src_if = m->m_pkthdr.rcvif;
2712
ifp = sc->sc_ifp;
2713
vlan = VLANTAGOF(m);
2714
2715
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
2716
if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2717
2718
if ((sbif->bif_flags & IFBIF_STP) &&
2719
sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2720
goto drop;
2721
2722
eh = mtod(m, struct ether_header *);
2723
dst = eh->ether_dhost;
2724
2725
/* If the interface is learning, record the address. */
2726
if (sbif->bif_flags & IFBIF_LEARNING) {
2727
error = bridge_rtupdate(sc, eh->ether_shost, vlan,
2728
sbif, 0, IFBAF_DYNAMIC);
2729
/*
2730
* If the interface has addresses limits then deny any source
2731
* that is not in the cache.
2732
*/
2733
if (error && sbif->bif_addrmax)
2734
goto drop;
2735
}
2736
2737
if ((sbif->bif_flags & IFBIF_STP) != 0 &&
2738
sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
2739
goto drop;
2740
2741
#ifdef DEV_NETMAP
2742
/*
2743
* Hand the packet to netmap only if it wasn't injected by netmap
2744
* itself.
2745
*/
2746
if ((m->m_flags & M_BRIDGE_INJECT) == 0 &&
2747
(if_getcapenable(ifp) & IFCAP_NETMAP) != 0) {
2748
ifp->if_input(ifp, m);
2749
return;
2750
}
2751
m->m_flags &= ~M_BRIDGE_INJECT;
2752
#endif
2753
2754
/*
2755
* At this point, the port either doesn't participate
2756
* in spanning tree or it is in the forwarding state.
2757
*/
2758
2759
/*
2760
* If the packet is unicast, destined for someone on
2761
* "this" side of the bridge, drop it.
2762
*/
2763
if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
2764
dst_if = bridge_rtlookup(sc, dst, vlan);
2765
if (src_if == dst_if)
2766
goto drop;
2767
} else {
2768
/*
2769
* Check if its a reserved multicast address, any address
2770
* listed in 802.1D section 7.12.6 may not be forwarded by the
2771
* bridge.
2772
* This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
2773
*/
2774
if (dst[0] == 0x01 && dst[1] == 0x80 &&
2775
dst[2] == 0xc2 && dst[3] == 0x00 &&
2776
dst[4] == 0x00 && dst[5] <= 0x0f)
2777
goto drop;
2778
2779
/* ...forward it to all interfaces. */
2780
if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
2781
dst_if = NULL;
2782
}
2783
2784
/*
2785
* If we have a destination interface which is a member of our bridge,
2786
* OR this is a unicast packet, push it through the bpf(4) machinery.
2787
* For broadcast or multicast packets, don't bother because it will
2788
* be reinjected into ether_input. We do this before we pass the packets
2789
* through the pfil(9) framework, as it is possible that pfil(9) will
2790
* drop the packet, or possibly modify it, making it difficult to debug
2791
* firewall issues on the bridge.
2792
*/
2793
if (dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0)
2794
ETHER_BPF_MTAP(ifp, m);
2795
2796
/* run the packet filter */
2797
if (PFIL_HOOKED_IN_46) {
2798
if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
2799
return;
2800
if (m == NULL)
2801
return;
2802
}
2803
2804
if (dst_if == NULL) {
2805
bridge_broadcast(sc, src_if, m, 1);
2806
return;
2807
}
2808
2809
/*
2810
* At this point, we're dealing with a unicast frame
2811
* going to a different interface.
2812
*/
2813
if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
2814
goto drop;
2815
2816
dbif = bridge_lookup_member_if(sc, dst_if);
2817
if (dbif == NULL)
2818
/* Not a member of the bridge (anymore?) */
2819
goto drop;
2820
2821
/* Private segments can not talk to each other */
2822
if (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE)
2823
goto drop;
2824
2825
if ((dbif->bif_flags & IFBIF_STP) &&
2826
dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
2827
goto drop;
2828
2829
if (PFIL_HOOKED_OUT_46) {
2830
if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
2831
return;
2832
if (m == NULL)
2833
return;
2834
}
2835
2836
bridge_enqueue(sc, dst_if, m, dbif);
2837
return;
2838
2839
drop:
2840
m_freem(m);
2841
}
2842
2843
/*
2844
* bridge_input:
2845
*
2846
* Receive input from a member interface. Queue the packet for
2847
* bridging if it is not for us.
2848
*/
2849
static struct mbuf *
2850
bridge_input(struct ifnet *ifp, struct mbuf *m)
2851
{
2852
struct bridge_softc *sc = NULL;
2853
struct bridge_iflist *bif, *bif2;
2854
struct ifnet *bifp;
2855
struct ether_header *eh;
2856
struct mbuf *mc, *mc2;
2857
ether_vlanid_t vlan;
2858
int error;
2859
2860
NET_EPOCH_ASSERT();
2861
2862
/* We need the Ethernet header later, so make sure we have it now. */
2863
if (m->m_len < ETHER_HDR_LEN) {
2864
m = m_pullup(m, ETHER_HDR_LEN);
2865
if (m == NULL) {
2866
if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2867
m_freem(m);
2868
return (NULL);
2869
}
2870
}
2871
2872
eh = mtod(m, struct ether_header *);
2873
vlan = VLANTAGOF(m);
2874
2875
/*
2876
* If this frame has a VLAN tag and the receiving interface has a
2877
* vlan(4) trunk, then it is is destined for vlan(4), not for us.
2878
* This means if vlan(4) and bridge(4) are configured on the same
2879
* interface, vlan(4) is preferred, which is what users typically
2880
* expect.
2881
*/
2882
if (vlan != DOT1Q_VID_NULL && ifp->if_vlantrunk != NULL)
2883
return (m);
2884
2885
bif = ifp->if_bridge;
2886
if (bif)
2887
sc = bif->bif_sc;
2888
2889
if (sc == NULL) {
2890
/*
2891
* This packet originated from the bridge itself, so it must
2892
* have been transmitted by netmap. Derive the "source"
2893
* interface from the source address and drop the packet if the
2894
* source address isn't known.
2895
*/
2896
KASSERT((m->m_flags & M_BRIDGE_INJECT) != 0,
2897
("%s: ifnet %p missing a bridge softc", __func__, ifp));
2898
sc = if_getsoftc(ifp);
2899
ifp = bridge_rtlookup(sc, eh->ether_shost, vlan);
2900
if (ifp == NULL) {
2901
if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2902
m_freem(m);
2903
return (NULL);
2904
}
2905
m->m_pkthdr.rcvif = ifp;
2906
}
2907
bifp = sc->sc_ifp;
2908
if ((bifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
2909
return (m);
2910
2911
/*
2912
* Implement support for bridge monitoring. If this flag has been
2913
* set on this interface, discard the packet once we push it through
2914
* the bpf(4) machinery, but before we do, increment the byte and
2915
* packet counters associated with this interface.
2916
*/
2917
if ((bifp->if_flags & IFF_MONITOR) != 0) {
2918
m->m_pkthdr.rcvif = bifp;
2919
ETHER_BPF_MTAP(bifp, m);
2920
if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1);
2921
if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
2922
m_freem(m);
2923
return (NULL);
2924
}
2925
2926
/* Do VLAN filtering. */
2927
if (!bridge_vfilter_in(bif, m)) {
2928
if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1);
2929
m_freem(m);
2930
return (NULL);
2931
}
2932
/* bridge_vfilter_in() may add a tag */
2933
vlan = VLANTAGOF(m);
2934
2935
bridge_span(sc, m);
2936
2937
if (m->m_flags & (M_BCAST|M_MCAST)) {
2938
/* Tap off 802.1D packets; they do not get forwarded. */
2939
if (memcmp(eh->ether_dhost, bstp_etheraddr,
2940
ETHER_ADDR_LEN) == 0) {
2941
bstp_input(&bif->bif_stp, ifp, m); /* consumes mbuf */
2942
return (NULL);
2943
}
2944
2945
if ((bif->bif_flags & IFBIF_STP) &&
2946
bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
2947
return (m);
2948
}
2949
2950
/*
2951
* Make a deep copy of the packet and enqueue the copy
2952
* for bridge processing; return the original packet for
2953
* local processing.
2954
*/
2955
mc = m_dup(m, M_NOWAIT);
2956
if (mc == NULL) {
2957
return (m);
2958
}
2959
2960
/* Perform the bridge forwarding function with the copy. */
2961
bridge_forward(sc, bif, mc);
2962
2963
#ifdef DEV_NETMAP
2964
/*
2965
* If netmap is enabled and has not already seen this packet,
2966
* then it will be consumed by bridge_forward().
2967
*/
2968
if ((if_getcapenable(bifp) & IFCAP_NETMAP) != 0 &&
2969
(m->m_flags & M_BRIDGE_INJECT) == 0) {
2970
m_freem(m);
2971
return (NULL);
2972
}
2973
#endif
2974
2975
/*
2976
* Reinject the mbuf as arriving on the bridge so we have a
2977
* chance at claiming multicast packets. We can not loop back
2978
* here from ether_input as a bridge is never a member of a
2979
* bridge.
2980
*/
2981
KASSERT(bifp->if_bridge == NULL,
2982
("loop created in bridge_input"));
2983
mc2 = m_dup(m, M_NOWAIT);
2984
if (mc2 != NULL) {
2985
/* Keep the layer3 header aligned */
2986
int i = min(mc2->m_pkthdr.len, max_protohdr);
2987
mc2 = m_copyup(mc2, i, ETHER_ALIGN);
2988
}
2989
if (mc2 != NULL) {
2990
mc2->m_pkthdr.rcvif = bifp;
2991
mc2->m_flags &= ~M_BRIDGE_INJECT;
2992
sc->sc_if_input(bifp, mc2);
2993
}
2994
2995
/* Return the original packet for local processing. */
2996
return (m);
2997
}
2998
2999
if ((bif->bif_flags & IFBIF_STP) &&
3000
bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
3001
return (m);
3002
}
3003
3004
#if defined(INET) || defined(INET6)
3005
#define CARP_CHECK_WE_ARE_DST(iface) \
3006
((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_dhost))
3007
#define CARP_CHECK_WE_ARE_SRC(iface) \
3008
((iface)->if_carp && (*carp_forus_p)((iface), eh->ether_shost))
3009
#else
3010
#define CARP_CHECK_WE_ARE_DST(iface) false
3011
#define CARP_CHECK_WE_ARE_SRC(iface) false
3012
#endif
3013
3014
#ifdef DEV_NETMAP
3015
#define GRAB_FOR_NETMAP(ifp, m) do { \
3016
if ((if_getcapenable(ifp) & IFCAP_NETMAP) != 0 && \
3017
((m)->m_flags & M_BRIDGE_INJECT) == 0) { \
3018
(ifp)->if_input(ifp, m); \
3019
return (NULL); \
3020
} \
3021
} while (0)
3022
#else
3023
#define GRAB_FOR_NETMAP(ifp, m)
3024
#endif
3025
3026
#define GRAB_OUR_PACKETS(iface) \
3027
if ((iface)->if_type == IFT_GIF) \
3028
continue; \
3029
/* It is destined for us. */ \
3030
if (memcmp(IF_LLADDR(iface), eh->ether_dhost, ETHER_ADDR_LEN) == 0 || \
3031
CARP_CHECK_WE_ARE_DST(iface)) { \
3032
if (bif->bif_flags & IFBIF_LEARNING) { \
3033
error = bridge_rtupdate(sc, eh->ether_shost, \
3034
vlan, bif, 0, IFBAF_DYNAMIC); \
3035
if (error && bif->bif_addrmax) { \
3036
m_freem(m); \
3037
return (NULL); \
3038
} \
3039
} \
3040
m->m_pkthdr.rcvif = iface; \
3041
if ((iface) == ifp) { \
3042
/* Skip bridge processing... src == dest */ \
3043
return (m); \
3044
} \
3045
/* It's passing over or to the bridge, locally. */ \
3046
ETHER_BPF_MTAP(bifp, m); \
3047
if_inc_counter(bifp, IFCOUNTER_IPACKETS, 1); \
3048
if_inc_counter(bifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);\
3049
/* Hand the packet over to netmap if necessary. */ \
3050
GRAB_FOR_NETMAP(bifp, m); \
3051
/* Filter on the physical interface. */ \
3052
if (V_pfil_local_phys && PFIL_HOOKED_IN_46) { \
3053
if (bridge_pfil(&m, NULL, ifp, \
3054
PFIL_IN) != 0 || m == NULL) { \
3055
return (NULL); \
3056
} \
3057
} \
3058
if ((iface) != bifp) \
3059
ETHER_BPF_MTAP(iface, m); \
3060
/* Pass tagged packets to if_vlan, if it's loaded */ \
3061
if (VLANTAGOF(m) != 0) { \
3062
if (bifp->if_vlantrunk == NULL) { \
3063
m_freem(m); \
3064
return (NULL); \
3065
} \
3066
(*vlan_input_p)(bifp, m); \
3067
return (NULL); \
3068
} \
3069
return (m); \
3070
} \
3071
\
3072
/* We just received a packet that we sent out. */ \
3073
if (memcmp(IF_LLADDR(iface), eh->ether_shost, ETHER_ADDR_LEN) == 0 || \
3074
CARP_CHECK_WE_ARE_SRC(iface)) { \
3075
m_freem(m); \
3076
return (NULL); \
3077
}
3078
3079
/*
3080
* Unicast. Make sure it's not for the bridge.
3081
*/
3082
do { GRAB_OUR_PACKETS(bifp) } while (0);
3083
3084
/*
3085
* If member_ifaddrs is enabled, see if the packet is destined for
3086
* one of the members' addresses.
3087
*/
3088
if (V_member_ifaddrs) {
3089
/* Check the interface the packet arrived on. */
3090
do { GRAB_OUR_PACKETS(ifp) } while (0);
3091
3092
CK_LIST_FOREACH(bif2, &sc->sc_iflist, bif_next) {
3093
GRAB_OUR_PACKETS(bif2->bif_ifp)
3094
}
3095
}
3096
3097
#undef CARP_CHECK_WE_ARE_DST
3098
#undef CARP_CHECK_WE_ARE_SRC
3099
#undef GRAB_FOR_NETMAP
3100
#undef GRAB_OUR_PACKETS
3101
3102
/* Perform the bridge forwarding function. */
3103
bridge_forward(sc, bif, m);
3104
3105
return (NULL);
3106
}
3107
3108
/*
3109
* Inject a packet back into the host ethernet stack. This will generally only
3110
* be used by netmap when an application writes to the host TX ring. The
3111
* M_BRIDGE_INJECT flag ensures that the packet is re-routed to the bridge
3112
* interface after ethernet processing.
3113
*/
3114
static void
3115
bridge_inject(struct ifnet *ifp, struct mbuf *m)
3116
{
3117
struct bridge_softc *sc;
3118
3119
if (ifp->if_type == IFT_L2VLAN) {
3120
/*
3121
* vlan(4) gives us the vlan ifnet, so we need to get the
3122
* bridge softc to get a pointer to ether_input to send the
3123
* packet to.
3124
*/
3125
struct ifnet *bifp = NULL;
3126
3127
if (vlan_trunkdev_p == NULL) {
3128
m_freem(m);
3129
return;
3130
}
3131
3132
bifp = vlan_trunkdev_p(ifp);
3133
if (bifp == NULL) {
3134
m_freem(m);
3135
return;
3136
}
3137
3138
sc = if_getsoftc(bifp);
3139
sc->sc_if_input(ifp, m);
3140
return;
3141
}
3142
3143
KASSERT((if_getcapenable(ifp) & IFCAP_NETMAP) != 0,
3144
("%s: iface %s is not running in netmap mode",
3145
__func__, if_name(ifp)));
3146
KASSERT((m->m_flags & M_BRIDGE_INJECT) == 0,
3147
("%s: mbuf %p has M_BRIDGE_INJECT set", __func__, m));
3148
3149
m->m_flags |= M_BRIDGE_INJECT;
3150
sc = if_getsoftc(ifp);
3151
sc->sc_if_input(ifp, m);
3152
}
3153
3154
/*
3155
* bridge_broadcast:
3156
*
3157
* Send a frame to all interfaces that are members of
3158
* the bridge, except for the one on which the packet
3159
* arrived.
3160
*
3161
* NOTE: Releases the lock on return.
3162
*/
3163
static void
3164
bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
3165
struct mbuf *m, int runfilt)
3166
{
3167
struct bridge_iflist *dbif, *sbif;
3168
struct mbuf *mc;
3169
struct ifnet *dst_if;
3170
int used = 0, i;
3171
3172
NET_EPOCH_ASSERT();
3173
3174
sbif = bridge_lookup_member_if(sc, src_if);
3175
3176
/* Filter on the bridge interface before broadcasting */
3177
if (runfilt && PFIL_HOOKED_OUT_46) {
3178
if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
3179
return;
3180
if (m == NULL)
3181
return;
3182
}
3183
3184
CK_LIST_FOREACH(dbif, &sc->sc_iflist, bif_next) {
3185
dst_if = dbif->bif_ifp;
3186
if (dst_if == src_if)
3187
continue;
3188
3189
/* Private segments can not talk to each other */
3190
if (sbif && (sbif->bif_flags & dbif->bif_flags & IFBIF_PRIVATE))
3191
continue;
3192
3193
if ((dbif->bif_flags & IFBIF_STP) &&
3194
dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3195
continue;
3196
3197
if ((dbif->bif_flags & IFBIF_DISCOVER) == 0 &&
3198
(m->m_flags & (M_BCAST|M_MCAST)) == 0)
3199
continue;
3200
3201
if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
3202
continue;
3203
3204
if (CK_LIST_NEXT(dbif, bif_next) == NULL) {
3205
mc = m;
3206
used = 1;
3207
} else {
3208
mc = m_dup(m, M_NOWAIT);
3209
if (mc == NULL) {
3210
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
3211
continue;
3212
}
3213
}
3214
3215
/*
3216
* Filter on the output interface. Pass a NULL bridge interface
3217
* pointer so we do not redundantly filter on the bridge for
3218
* each interface we broadcast on.
3219
*/
3220
if (runfilt && PFIL_HOOKED_OUT_46) {
3221
if (used == 0) {
3222
/* Keep the layer3 header aligned */
3223
i = min(mc->m_pkthdr.len, max_protohdr);
3224
mc = m_copyup(mc, i, ETHER_ALIGN);
3225
if (mc == NULL) {
3226
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
3227
continue;
3228
}
3229
}
3230
if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
3231
continue;
3232
if (mc == NULL)
3233
continue;
3234
}
3235
3236
bridge_enqueue(sc, dst_if, mc, dbif);
3237
}
3238
if (used == 0)
3239
m_freem(m);
3240
}
3241
3242
/*
3243
* bridge_span:
3244
*
3245
* Duplicate a packet out one or more interfaces that are in span mode,
3246
* the original mbuf is unmodified.
3247
*/
3248
static void
3249
bridge_span(struct bridge_softc *sc, struct mbuf *m)
3250
{
3251
struct bridge_iflist *bif;
3252
struct ifnet *dst_if;
3253
struct mbuf *mc;
3254
3255
NET_EPOCH_ASSERT();
3256
3257
if (CK_LIST_EMPTY(&sc->sc_spanlist))
3258
return;
3259
3260
CK_LIST_FOREACH(bif, &sc->sc_spanlist, bif_next) {
3261
dst_if = bif->bif_ifp;
3262
3263
if ((dst_if->if_drv_flags & IFF_DRV_RUNNING) == 0)
3264
continue;
3265
3266
mc = m_dup(m, M_NOWAIT);
3267
if (mc == NULL) {
3268
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
3269
continue;
3270
}
3271
3272
bridge_enqueue(sc, dst_if, mc, bif);
3273
}
3274
}
3275
3276
/*
3277
* Incoming VLAN filtering. Given a frame and the member interface it was
3278
* received on, decide whether the port configuration allows it.
3279
*/
3280
static bool
3281
bridge_vfilter_in(const struct bridge_iflist *sbif, struct mbuf *m)
3282
{
3283
ether_vlanid_t vlan;
3284
3285
vlan = VLANTAGOF(m);
3286
/* Make sure the vlan id is reasonable. */
3287
if (vlan > DOT1Q_VID_MAX)
3288
return (false);
3289
3290
/*
3291
* If VLAN filtering isn't enabled, pass everything, but add a tag
3292
* if the port has a pvid configured.
3293
*/
3294
if ((sbif->bif_sc->sc_flags & IFBRF_VLANFILTER) == 0) {
3295
if (vlan == DOT1Q_VID_NULL &&
3296
sbif->bif_pvid != DOT1Q_VID_NULL) {
3297
m->m_pkthdr.ether_vtag = sbif->bif_pvid;
3298
m->m_flags |= M_VLANTAG;
3299
}
3300
3301
return (true);
3302
}
3303
3304
/* If Q-in-Q is disabled, check for stacked tags. */
3305
if ((sbif->bif_flags & IFBIF_QINQ) == 0) {
3306
struct ether_header *eh;
3307
uint16_t proto;
3308
3309
eh = mtod(m, struct ether_header *);
3310
proto = ntohs(eh->ether_type);
3311
3312
if (proto == ETHERTYPE_VLAN || proto == ETHERTYPE_QINQ)
3313
return (false);
3314
}
3315
3316
if (vlan == DOT1Q_VID_NULL) {
3317
/*
3318
* The frame doesn't have a tag. If the interface does not
3319
* have an untagged vlan configured, drop the frame.
3320
*/
3321
if (sbif->bif_pvid == DOT1Q_VID_NULL)
3322
return (false);
3323
3324
/*
3325
* Otherwise, insert a new tag based on the interface's
3326
* untagged vlan id.
3327
*/
3328
m->m_pkthdr.ether_vtag = sbif->bif_pvid;
3329
m->m_flags |= M_VLANTAG;
3330
} else {
3331
/*
3332
* The frame has a tag, so check it matches the interface's
3333
* vlan access list. We explicitly do not accept tagged
3334
* frames for the untagged vlan id here (unless it's also
3335
* in the access list).
3336
*/
3337
if (!BRVLAN_TEST(&sbif->bif_vlan_set, vlan))
3338
return (false);
3339
}
3340
3341
/* Accept the frame. */
3342
return (true);
3343
}
3344
3345
/*
3346
* Outgoing VLAN filtering. Given a frame, its vlan, and the member interface
3347
* we intend to send it to, decide whether the port configuration allows it to
3348
* be sent.
3349
*/
3350
static bool
3351
bridge_vfilter_out(const struct bridge_iflist *dbif, const struct mbuf *m)
3352
{
3353
struct ether_header *eh;
3354
ether_vlanid_t vlan;
3355
3356
NET_EPOCH_ASSERT();
3357
3358
/*
3359
* If the interface is in span mode, then bif_sc will be NULL.
3360
* Since the purpose of span interfaces is to receive all frames,
3361
* pass everything.
3362
*/
3363
if (dbif->bif_sc == NULL)
3364
return (true);
3365
3366
/* If VLAN filtering isn't enabled, pass everything. */
3367
if ((dbif->bif_sc->sc_flags & IFBRF_VLANFILTER) == 0)
3368
return (true);
3369
3370
vlan = VLANTAGOF(m);
3371
3372
/*
3373
* Always allow untagged 802.1D STP frames, even if they would
3374
* otherwise be dropped. This is required for STP to work on
3375
* a filtering bridge.
3376
*
3377
* Tagged STP (Cisco PVST+) is a non-standard extension, so
3378
* handle those frames via the normal filtering path.
3379
*/
3380
eh = mtod(m, struct ether_header *);
3381
if (vlan == DOT1Q_VID_NULL &&
3382
memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0)
3383
return (true);
3384
3385
/*
3386
* If the frame wasn't assigned to a vlan at ingress, drop it.
3387
* We can't forward these frames to filtering ports because we
3388
* don't know what VLAN they're supposed to be in.
3389
*/
3390
if (vlan == DOT1Q_VID_NULL)
3391
return (false);
3392
3393
/*
3394
* If the frame's vlan matches the interfaces's untagged vlan,
3395
* allow it.
3396
*/
3397
if (vlan == dbif->bif_pvid)
3398
return (true);
3399
3400
/*
3401
* If the frame's vlan is on the interface's tagged access list,
3402
* allow it.
3403
*/
3404
if (BRVLAN_TEST(&dbif->bif_vlan_set, vlan))
3405
return (true);
3406
3407
/* The frame was not permitted, so drop it. */
3408
return (false);
3409
}
3410
3411
/*
3412
* bridge_rtupdate:
3413
*
3414
* Add a bridge routing entry.
3415
*/
3416
static int
3417
bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst,
3418
ether_vlanid_t vlan, struct bridge_iflist *bif,
3419
int setflags, uint8_t flags)
3420
{
3421
struct bridge_rtnode *brt;
3422
struct bridge_iflist *obif;
3423
int error;
3424
3425
BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
3426
3427
/* Check the source address is valid and not multicast. */
3428
if (ETHER_IS_MULTICAST(dst))
3429
return (EXTERROR(EINVAL, "Multicast address not permitted"));
3430
if (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
3431
dst[3] == 0 && dst[4] == 0 && dst[5] == 0)
3432
return (EXTERROR(EINVAL, "Zero address not permitted"));
3433
3434
/*
3435
* A route for this destination might already exist. If so,
3436
* update it, otherwise create a new one.
3437
*/
3438
if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
3439
BRIDGE_RT_LOCK(sc);
3440
3441
/* Check again, now that we have the lock. There could have
3442
* been a race and we only want to insert this once. */
3443
if (bridge_rtnode_lookup(sc, dst, vlan) != NULL) {
3444
BRIDGE_RT_UNLOCK(sc);
3445
return (0);
3446
}
3447
3448
if (sc->sc_brtcnt >= sc->sc_brtmax) {
3449
sc->sc_brtexceeded++;
3450
BRIDGE_RT_UNLOCK(sc);
3451
return (EXTERROR(ENOSPC, "Address table is full"));
3452
}
3453
/* Check per interface address limits (if enabled) */
3454
if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
3455
bif->bif_addrexceeded++;
3456
BRIDGE_RT_UNLOCK(sc);
3457
return (EXTERROR(ENOSPC,
3458
"Interface address limit exceeded"));
3459
}
3460
3461
/*
3462
* Allocate a new bridge forwarding node, and
3463
* initialize the expiration time and Ethernet
3464
* address.
3465
*/
3466
brt = uma_zalloc(V_bridge_rtnode_zone, M_NOWAIT | M_ZERO);
3467
if (brt == NULL) {
3468
BRIDGE_RT_UNLOCK(sc);
3469
return (EXTERROR(ENOMEM,
3470
"Cannot allocate address node"));
3471
}
3472
brt->brt_vnet = curvnet;
3473
3474
if (bif->bif_flags & IFBIF_STICKY)
3475
brt->brt_flags = IFBAF_STICKY;
3476
else
3477
brt->brt_flags = IFBAF_DYNAMIC;
3478
3479
memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
3480
brt->brt_vlan = vlan;
3481
3482
brt->brt_dst = bif;
3483
if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
3484
uma_zfree(V_bridge_rtnode_zone, brt);
3485
BRIDGE_RT_UNLOCK(sc);
3486
return (error);
3487
}
3488
bif->bif_addrcnt++;
3489
3490
BRIDGE_RT_UNLOCK(sc);
3491
}
3492
3493
if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
3494
(obif = brt->brt_dst) != bif) {
3495
MPASS(obif != NULL);
3496
3497
BRIDGE_RT_LOCK(sc);
3498
brt->brt_dst->bif_addrcnt--;
3499
brt->brt_dst = bif;
3500
brt->brt_dst->bif_addrcnt++;
3501
BRIDGE_RT_UNLOCK(sc);
3502
3503
if (V_log_mac_flap &&
3504
ppsratecheck(&V_log_last, &V_log_count, V_log_interval)) {
3505
log(LOG_NOTICE,
3506
"%s: mac address %6D vlan %d moved from %s to %s\n",
3507
sc->sc_ifp->if_xname,
3508
&brt->brt_addr[0], ":",
3509
brt->brt_vlan,
3510
obif->bif_ifp->if_xname,
3511
bif->bif_ifp->if_xname);
3512
}
3513
}
3514
3515
if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3516
brt->brt_expire = time_uptime + sc->sc_brttimeout;
3517
if (setflags)
3518
brt->brt_flags = flags;
3519
3520
return (0);
3521
}
3522
3523
/*
3524
* bridge_rtlookup:
3525
*
3526
* Lookup the destination interface for an address.
3527
*/
3528
static struct ifnet *
3529
bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr,
3530
ether_vlanid_t vlan)
3531
{
3532
struct bridge_rtnode *brt;
3533
3534
NET_EPOCH_ASSERT();
3535
3536
if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
3537
return (NULL);
3538
3539
return (brt->brt_ifp);
3540
}
3541
3542
/*
3543
* bridge_rttrim:
3544
*
3545
* Trim the routine table so that we have a number
3546
* of routing entries less than or equal to the
3547
* maximum number.
3548
*/
3549
static void
3550
bridge_rttrim(struct bridge_softc *sc)
3551
{
3552
struct bridge_rtnode *brt, *nbrt;
3553
3554
NET_EPOCH_ASSERT();
3555
BRIDGE_RT_LOCK_ASSERT(sc);
3556
3557
/* Make sure we actually need to do this. */
3558
if (sc->sc_brtcnt <= sc->sc_brtmax)
3559
return;
3560
3561
/* Force an aging cycle; this might trim enough addresses. */
3562
bridge_rtage(sc);
3563
if (sc->sc_brtcnt <= sc->sc_brtmax)
3564
return;
3565
3566
CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3567
if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3568
bridge_rtnode_destroy(sc, brt);
3569
if (sc->sc_brtcnt <= sc->sc_brtmax)
3570
return;
3571
}
3572
}
3573
}
3574
3575
/*
3576
* bridge_timer:
3577
*
3578
* Aging timer for the bridge.
3579
*/
3580
static void
3581
bridge_timer(void *arg)
3582
{
3583
struct bridge_softc *sc = arg;
3584
3585
BRIDGE_RT_LOCK_ASSERT(sc);
3586
3587
/* Destruction of rtnodes requires a proper vnet context */
3588
CURVNET_SET(sc->sc_ifp->if_vnet);
3589
bridge_rtage(sc);
3590
3591
if (sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)
3592
callout_reset(&sc->sc_brcallout,
3593
bridge_rtable_prune_period * hz, bridge_timer, sc);
3594
CURVNET_RESTORE();
3595
}
3596
3597
/*
3598
* bridge_rtage:
3599
*
3600
* Perform an aging cycle.
3601
*/
3602
static void
3603
bridge_rtage(struct bridge_softc *sc)
3604
{
3605
struct bridge_rtnode *brt, *nbrt;
3606
3607
BRIDGE_RT_LOCK_ASSERT(sc);
3608
3609
CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3610
if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
3611
if (time_uptime >= brt->brt_expire)
3612
bridge_rtnode_destroy(sc, brt);
3613
}
3614
}
3615
}
3616
3617
/*
3618
* bridge_rtflush:
3619
*
3620
* Remove all dynamic addresses from the bridge.
3621
*/
3622
static void
3623
bridge_rtflush(struct bridge_softc *sc, int full)
3624
{
3625
struct bridge_rtnode *brt, *nbrt;
3626
3627
BRIDGE_RT_LOCK_ASSERT(sc);
3628
3629
CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3630
if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3631
bridge_rtnode_destroy(sc, brt);
3632
}
3633
}
3634
3635
/*
3636
* bridge_rtdaddr:
3637
*
3638
* Remove an address from the table.
3639
*/
3640
static int
3641
bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr,
3642
ether_vlanid_t vlan)
3643
{
3644
struct bridge_rtnode *brt;
3645
int found = 0;
3646
3647
BRIDGE_RT_LOCK(sc);
3648
3649
/*
3650
* If vlan is DOT1Q_VID_RSVD_IMPL then we want to delete for all vlans
3651
* so the lookup may return more than one.
3652
*/
3653
while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
3654
bridge_rtnode_destroy(sc, brt);
3655
found = 1;
3656
}
3657
3658
BRIDGE_RT_UNLOCK(sc);
3659
3660
return (found ? 0 : ENOENT);
3661
}
3662
3663
/*
3664
* bridge_rtdelete:
3665
*
3666
* Delete routes to a speicifc member interface.
3667
*/
3668
static void
3669
bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
3670
{
3671
struct bridge_rtnode *brt, *nbrt;
3672
3673
BRIDGE_RT_LOCK_ASSERT(sc);
3674
3675
CK_LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
3676
if (brt->brt_ifp == ifp && (full ||
3677
(brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
3678
bridge_rtnode_destroy(sc, brt);
3679
}
3680
}
3681
3682
/*
3683
* bridge_rtable_init:
3684
*
3685
* Initialize the route table for this bridge.
3686
*/
3687
static void
3688
bridge_rtable_init(struct bridge_softc *sc)
3689
{
3690
int i;
3691
3692
sc->sc_rthash = malloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
3693
M_DEVBUF, M_WAITOK);
3694
3695
for (i = 0; i < BRIDGE_RTHASH_SIZE; i++)
3696
CK_LIST_INIT(&sc->sc_rthash[i]);
3697
3698
sc->sc_rthash_key = arc4random();
3699
CK_LIST_INIT(&sc->sc_rtlist);
3700
}
3701
3702
/*
3703
* bridge_rtable_fini:
3704
*
3705
* Deconstruct the route table for this bridge.
3706
*/
3707
static void
3708
bridge_rtable_fini(struct bridge_softc *sc)
3709
{
3710
3711
KASSERT(sc->sc_brtcnt == 0,
3712
("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
3713
free(sc->sc_rthash, M_DEVBUF);
3714
}
3715
3716
/*
3717
* The following hash function is adapted from "Hash Functions" by Bob Jenkins
3718
* ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
3719
*/
3720
#define mix(a, b, c) \
3721
do { \
3722
a -= b; a -= c; a ^= (c >> 13); \
3723
b -= c; b -= a; b ^= (a << 8); \
3724
c -= a; c -= b; c ^= (b >> 13); \
3725
a -= b; a -= c; a ^= (c >> 12); \
3726
b -= c; b -= a; b ^= (a << 16); \
3727
c -= a; c -= b; c ^= (b >> 5); \
3728
a -= b; a -= c; a ^= (c >> 3); \
3729
b -= c; b -= a; b ^= (a << 10); \
3730
c -= a; c -= b; c ^= (b >> 15); \
3731
} while (/*CONSTCOND*/0)
3732
3733
static __inline uint32_t
3734
bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
3735
{
3736
uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
3737
3738
b += addr[5] << 8;
3739
b += addr[4];
3740
a += addr[3] << 24;
3741
a += addr[2] << 16;
3742
a += addr[1] << 8;
3743
a += addr[0];
3744
3745
mix(a, b, c);
3746
3747
return (c & BRIDGE_RTHASH_MASK);
3748
}
3749
3750
#undef mix
3751
3752
static int
3753
bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
3754
{
3755
int i, d;
3756
3757
for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
3758
d = ((int)a[i]) - ((int)b[i]);
3759
}
3760
3761
return (d);
3762
}
3763
3764
/*
3765
* bridge_rtnode_lookup:
3766
*
3767
* Look up a bridge route node for the specified destination. Compare the
3768
* vlan id or if zero then just return the first match.
3769
*/
3770
static struct bridge_rtnode *
3771
bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr,
3772
ether_vlanid_t vlan)
3773
{
3774
struct bridge_rtnode *brt;
3775
uint32_t hash;
3776
int dir;
3777
3778
BRIDGE_RT_LOCK_OR_NET_EPOCH_ASSERT(sc);
3779
3780
hash = bridge_rthash(sc, addr);
3781
CK_LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
3782
dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
3783
if (dir == 0 && (brt->brt_vlan == vlan || vlan == DOT1Q_VID_RSVD_IMPL))
3784
return (brt);
3785
if (dir > 0)
3786
return (NULL);
3787
}
3788
3789
return (NULL);
3790
}
3791
3792
/*
3793
* bridge_rtnode_insert:
3794
*
3795
* Insert the specified bridge node into the route table. We
3796
* assume the entry is not already in the table.
3797
*/
3798
static int
3799
bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
3800
{
3801
struct bridge_rtnode *lbrt;
3802
uint32_t hash;
3803
int dir;
3804
3805
BRIDGE_RT_LOCK_ASSERT(sc);
3806
3807
hash = bridge_rthash(sc, brt->brt_addr);
3808
3809
lbrt = CK_LIST_FIRST(&sc->sc_rthash[hash]);
3810
if (lbrt == NULL) {
3811
CK_LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
3812
goto out;
3813
}
3814
3815
do {
3816
dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
3817
if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan)
3818
return (EXTERROR(EEXIST, "Address already exists"));
3819
if (dir > 0) {
3820
CK_LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
3821
goto out;
3822
}
3823
if (CK_LIST_NEXT(lbrt, brt_hash) == NULL) {
3824
CK_LIST_INSERT_AFTER(lbrt, brt, brt_hash);
3825
goto out;
3826
}
3827
lbrt = CK_LIST_NEXT(lbrt, brt_hash);
3828
} while (lbrt != NULL);
3829
3830
#ifdef DIAGNOSTIC
3831
panic("bridge_rtnode_insert: impossible");
3832
#endif
3833
3834
out:
3835
CK_LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
3836
sc->sc_brtcnt++;
3837
3838
return (0);
3839
}
3840
3841
static void
3842
bridge_rtnode_destroy_cb(struct epoch_context *ctx)
3843
{
3844
struct bridge_rtnode *brt;
3845
3846
brt = __containerof(ctx, struct bridge_rtnode, brt_epoch_ctx);
3847
3848
CURVNET_SET(brt->brt_vnet);
3849
uma_zfree(V_bridge_rtnode_zone, brt);
3850
CURVNET_RESTORE();
3851
}
3852
3853
/*
3854
* bridge_rtnode_destroy:
3855
*
3856
* Destroy a bridge rtnode.
3857
*/
3858
static void
3859
bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
3860
{
3861
BRIDGE_RT_LOCK_ASSERT(sc);
3862
3863
CK_LIST_REMOVE(brt, brt_hash);
3864
3865
CK_LIST_REMOVE(brt, brt_list);
3866
sc->sc_brtcnt--;
3867
brt->brt_dst->bif_addrcnt--;
3868
3869
NET_EPOCH_CALL(bridge_rtnode_destroy_cb, &brt->brt_epoch_ctx);
3870
}
3871
3872
/*
3873
* bridge_rtable_expire:
3874
*
3875
* Set the expiry time for all routes on an interface.
3876
*/
3877
static void
3878
bridge_rtable_expire(struct ifnet *ifp, int age)
3879
{
3880
struct bridge_iflist *bif = NULL;
3881
struct bridge_softc *sc = NULL;
3882
struct bridge_rtnode *brt;
3883
3884
CURVNET_SET(ifp->if_vnet);
3885
3886
bif = ifp->if_bridge;
3887
if (bif)
3888
sc = bif->bif_sc;
3889
MPASS(sc != NULL);
3890
BRIDGE_RT_LOCK(sc);
3891
3892
/*
3893
* If the age is zero then flush, otherwise set all the expiry times to
3894
* age for the interface
3895
*/
3896
if (age == 0)
3897
bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
3898
else {
3899
CK_LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
3900
/* Cap the expiry time to 'age' */
3901
if (brt->brt_ifp == ifp &&
3902
brt->brt_expire > time_uptime + age &&
3903
(brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
3904
brt->brt_expire = time_uptime + age;
3905
}
3906
}
3907
BRIDGE_RT_UNLOCK(sc);
3908
CURVNET_RESTORE();
3909
}
3910
3911
/*
3912
* bridge_state_change:
3913
*
3914
* Callback from the bridgestp code when a port changes states.
3915
*/
3916
static void
3917
bridge_state_change(struct ifnet *ifp, int state)
3918
{
3919
struct bridge_iflist *bif = ifp->if_bridge;
3920
struct bridge_softc *sc = bif->bif_sc;
3921
static const char *stpstates[] = {
3922
"disabled",
3923
"listening",
3924
"learning",
3925
"forwarding",
3926
"blocking",
3927
"discarding"
3928
};
3929
3930
CURVNET_SET(ifp->if_vnet);
3931
if (V_log_stp)
3932
log(LOG_NOTICE, "%s: state changed to %s on %s\n",
3933
sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname);
3934
CURVNET_RESTORE();
3935
}
3936
3937
/*
3938
* Send bridge packets through pfil if they are one of the types pfil can deal
3939
* with, or if they are ARP or REVARP. (pfil will pass ARP and REVARP without
3940
* question.) If *bifp or *ifp are NULL then packet filtering is skipped for
3941
* that interface.
3942
*/
3943
static int
3944
bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
3945
{
3946
int snap, error, i;
3947
struct ether_header *eh1, eh2;
3948
struct llc llc1;
3949
u_int16_t ether_type;
3950
pfil_return_t rv;
3951
#ifdef INET
3952
struct ip *ip = NULL;
3953
int hlen = 0;
3954
#endif
3955
3956
snap = 0;
3957
error = -1; /* Default error if not error == 0 */
3958
3959
#if 0
3960
/* we may return with the IP fields swapped, ensure its not shared */
3961
KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
3962
#endif
3963
3964
if (V_pfil_bridge == 0 && V_pfil_member == 0 && V_pfil_ipfw == 0)
3965
return (0); /* filtering is disabled */
3966
3967
i = min((*mp)->m_pkthdr.len, max_protohdr);
3968
if ((*mp)->m_len < i) {
3969
*mp = m_pullup(*mp, i);
3970
if (*mp == NULL) {
3971
printf("%s: m_pullup failed\n", __func__);
3972
return (-1);
3973
}
3974
}
3975
3976
eh1 = mtod(*mp, struct ether_header *);
3977
ether_type = ntohs(eh1->ether_type);
3978
3979
/*
3980
* Check for SNAP/LLC.
3981
*/
3982
if (ether_type < ETHERMTU) {
3983
struct llc *llc2 = (struct llc *)(eh1 + 1);
3984
3985
if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
3986
llc2->llc_dsap == LLC_SNAP_LSAP &&
3987
llc2->llc_ssap == LLC_SNAP_LSAP &&
3988
llc2->llc_control == LLC_UI) {
3989
ether_type = htons(llc2->llc_un.type_snap.ether_type);
3990
snap = 1;
3991
}
3992
}
3993
3994
/*
3995
* If we're trying to filter bridge traffic, only look at traffic for
3996
* protocols available in the kernel (IPv4 and/or IPv6) to avoid
3997
* passing traffic for an unsupported protocol to the filter. This is
3998
* lame since if we really wanted, say, an AppleTalk filter, we are
3999
* hosed, but of course we don't have an AppleTalk filter to begin
4000
* with. (Note that since pfil doesn't understand ARP it will pass
4001
* *ALL* ARP traffic.)
4002
*/
4003
switch (ether_type) {
4004
#ifdef INET
4005
case ETHERTYPE_ARP:
4006
case ETHERTYPE_REVARP:
4007
if (V_pfil_ipfw_arp == 0)
4008
return (0); /* Automatically pass */
4009
4010
/* FALLTHROUGH */
4011
case ETHERTYPE_IP:
4012
#endif
4013
#ifdef INET6
4014
case ETHERTYPE_IPV6:
4015
#endif /* INET6 */
4016
break;
4017
4018
default:
4019
/*
4020
* We get here if the packet isn't from a supported
4021
* protocol. Check to see if the user wants to pass
4022
* non-IP packets, these will not be checked by pfil(9)
4023
* and passed unconditionally so the default is to
4024
* drop.
4025
*/
4026
if (V_pfil_onlyip)
4027
goto bad;
4028
}
4029
4030
/* Run the packet through pfil before stripping link headers */
4031
if (PFIL_HOOKED_OUT(V_link_pfil_head) && V_pfil_ipfw != 0 &&
4032
dir == PFIL_OUT && ifp != NULL) {
4033
switch (pfil_mbuf_out(V_link_pfil_head, mp, ifp, NULL)) {
4034
case PFIL_DROPPED:
4035
return (EACCES);
4036
case PFIL_CONSUMED:
4037
return (0);
4038
}
4039
}
4040
4041
/* Strip off the Ethernet header and keep a copy. */
4042
m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t) &eh2);
4043
m_adj(*mp, ETHER_HDR_LEN);
4044
4045
/* Strip off snap header, if present */
4046
if (snap) {
4047
m_copydata(*mp, 0, sizeof(struct llc), (caddr_t) &llc1);
4048
m_adj(*mp, sizeof(struct llc));
4049
}
4050
4051
/*
4052
* Check the IP header for alignment and errors
4053
*/
4054
if (dir == PFIL_IN) {
4055
switch (ether_type) {
4056
#ifdef INET
4057
case ETHERTYPE_IP:
4058
error = bridge_ip_checkbasic(mp);
4059
break;
4060
#endif
4061
#ifdef INET6
4062
case ETHERTYPE_IPV6:
4063
error = bridge_ip6_checkbasic(mp);
4064
break;
4065
#endif /* INET6 */
4066
default:
4067
error = 0;
4068
}
4069
if (error)
4070
goto bad;
4071
}
4072
4073
error = 0;
4074
4075
/*
4076
* Run the packet through pfil
4077
*/
4078
rv = PFIL_PASS;
4079
switch (ether_type) {
4080
#ifdef INET
4081
case ETHERTYPE_IP:
4082
/*
4083
* Run pfil on the member interface and the bridge, both can
4084
* be skipped by clearing pfil_member or pfil_bridge.
4085
*
4086
* Keep the order:
4087
* in_if -> bridge_if -> out_if
4088
*/
4089
if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
4090
pfil_mbuf_out(V_inet_pfil_head, mp, bifp, NULL)) !=
4091
PFIL_PASS)
4092
break;
4093
4094
if (V_pfil_member && ifp != NULL) {
4095
rv = (dir == PFIL_OUT) ?
4096
pfil_mbuf_out(V_inet_pfil_head, mp, ifp, NULL) :
4097
pfil_mbuf_in(V_inet_pfil_head, mp, ifp, NULL);
4098
if (rv != PFIL_PASS)
4099
break;
4100
}
4101
4102
if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
4103
pfil_mbuf_in(V_inet_pfil_head, mp, bifp, NULL)) !=
4104
PFIL_PASS)
4105
break;
4106
4107
/* check if we need to fragment the packet */
4108
/* bridge_fragment generates a mbuf chain of packets */
4109
/* that already include eth headers */
4110
if (V_pfil_member && ifp != NULL && dir == PFIL_OUT) {
4111
i = (*mp)->m_pkthdr.len;
4112
if (i > ifp->if_mtu) {
4113
error = bridge_fragment(ifp, mp, &eh2, snap,
4114
&llc1);
4115
return (error);
4116
}
4117
}
4118
4119
/* Recalculate the ip checksum. */
4120
ip = mtod(*mp, struct ip *);
4121
hlen = ip->ip_hl << 2;
4122
if (hlen < sizeof(struct ip))
4123
goto bad;
4124
if (hlen > (*mp)->m_len) {
4125
if ((*mp = m_pullup(*mp, hlen)) == NULL)
4126
goto bad;
4127
ip = mtod(*mp, struct ip *);
4128
if (ip == NULL)
4129
goto bad;
4130
}
4131
ip->ip_sum = 0;
4132
if (hlen == sizeof(struct ip))
4133
ip->ip_sum = in_cksum_hdr(ip);
4134
else
4135
ip->ip_sum = in_cksum(*mp, hlen);
4136
4137
break;
4138
#endif /* INET */
4139
#ifdef INET6
4140
case ETHERTYPE_IPV6:
4141
if (V_pfil_bridge && dir == PFIL_OUT && bifp != NULL && (rv =
4142
pfil_mbuf_out(V_inet6_pfil_head, mp, bifp, NULL)) !=
4143
PFIL_PASS)
4144
break;
4145
4146
if (V_pfil_member && ifp != NULL) {
4147
rv = (dir == PFIL_OUT) ?
4148
pfil_mbuf_out(V_inet6_pfil_head, mp, ifp, NULL) :
4149
pfil_mbuf_in(V_inet6_pfil_head, mp, ifp, NULL);
4150
if (rv != PFIL_PASS)
4151
break;
4152
}
4153
4154
if (V_pfil_bridge && dir == PFIL_IN && bifp != NULL && (rv =
4155
pfil_mbuf_in(V_inet6_pfil_head, mp, bifp, NULL)) !=
4156
PFIL_PASS)
4157
break;
4158
break;
4159
#endif
4160
}
4161
4162
switch (rv) {
4163
case PFIL_CONSUMED:
4164
return (0);
4165
case PFIL_DROPPED:
4166
return (EACCES);
4167
default:
4168
break;
4169
}
4170
4171
error = -1;
4172
4173
/*
4174
* Finally, put everything back the way it was and return
4175
*/
4176
if (snap) {
4177
M_PREPEND(*mp, sizeof(struct llc), M_NOWAIT);
4178
if (*mp == NULL)
4179
return (error);
4180
bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc));
4181
}
4182
4183
M_PREPEND(*mp, ETHER_HDR_LEN, M_NOWAIT);
4184
if (*mp == NULL)
4185
return (error);
4186
bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
4187
4188
return (0);
4189
4190
bad:
4191
m_freem(*mp);
4192
*mp = NULL;
4193
return (error);
4194
}
4195
4196
#ifdef INET
4197
/*
4198
* Perform basic checks on header size since
4199
* pfil assumes ip_input has already processed
4200
* it for it. Cut-and-pasted from ip_input.c.
4201
* Given how simple the IPv6 version is,
4202
* does the IPv4 version really need to be
4203
* this complicated?
4204
*
4205
* XXX Should we update ipstat here, or not?
4206
* XXX Right now we update ipstat but not
4207
* XXX csum_counter.
4208
*/
4209
static int
4210
bridge_ip_checkbasic(struct mbuf **mp)
4211
{
4212
struct mbuf *m = *mp;
4213
struct ip *ip;
4214
int len, hlen;
4215
u_short sum;
4216
4217
if (*mp == NULL)
4218
return (-1);
4219
4220
if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4221
if ((m = m_copyup(m, sizeof(struct ip),
4222
(max_linkhdr + 3) & ~3)) == NULL) {
4223
/* XXXJRT new stat, please */
4224
KMOD_IPSTAT_INC(ips_toosmall);
4225
goto bad;
4226
}
4227
} else if (__predict_false(m->m_len < sizeof (struct ip))) {
4228
if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
4229
KMOD_IPSTAT_INC(ips_toosmall);
4230
goto bad;
4231
}
4232
}
4233
ip = mtod(m, struct ip *);
4234
if (ip == NULL) goto bad;
4235
4236
if (ip->ip_v != IPVERSION) {
4237
KMOD_IPSTAT_INC(ips_badvers);
4238
goto bad;
4239
}
4240
hlen = ip->ip_hl << 2;
4241
if (hlen < sizeof(struct ip)) { /* minimum header length */
4242
KMOD_IPSTAT_INC(ips_badhlen);
4243
goto bad;
4244
}
4245
if (hlen > m->m_len) {
4246
if ((m = m_pullup(m, hlen)) == NULL) {
4247
KMOD_IPSTAT_INC(ips_badhlen);
4248
goto bad;
4249
}
4250
ip = mtod(m, struct ip *);
4251
if (ip == NULL) goto bad;
4252
}
4253
4254
if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
4255
sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
4256
} else {
4257
if (hlen == sizeof(struct ip)) {
4258
sum = in_cksum_hdr(ip);
4259
} else {
4260
sum = in_cksum(m, hlen);
4261
}
4262
}
4263
if (sum) {
4264
KMOD_IPSTAT_INC(ips_badsum);
4265
goto bad;
4266
}
4267
4268
/* Retrieve the packet length. */
4269
len = ntohs(ip->ip_len);
4270
4271
/*
4272
* Check for additional length bogosity
4273
*/
4274
if (len < hlen) {
4275
KMOD_IPSTAT_INC(ips_badlen);
4276
goto bad;
4277
}
4278
4279
/*
4280
* Check that the amount of data in the buffers
4281
* is as at least much as the IP header would have us expect.
4282
* Drop packet if shorter than we expect.
4283
*/
4284
if (m->m_pkthdr.len < len) {
4285
KMOD_IPSTAT_INC(ips_tooshort);
4286
goto bad;
4287
}
4288
4289
/* Checks out, proceed */
4290
*mp = m;
4291
return (0);
4292
4293
bad:
4294
*mp = m;
4295
return (-1);
4296
}
4297
#endif /* INET */
4298
4299
#ifdef INET6
4300
/*
4301
* Same as above, but for IPv6.
4302
* Cut-and-pasted from ip6_input.c.
4303
* XXX Should we update ip6stat, or not?
4304
*/
4305
static int
4306
bridge_ip6_checkbasic(struct mbuf **mp)
4307
{
4308
struct mbuf *m = *mp;
4309
struct ip6_hdr *ip6;
4310
4311
/*
4312
* If the IPv6 header is not aligned, slurp it up into a new
4313
* mbuf with space for link headers, in the event we forward
4314
* it. Otherwise, if it is aligned, make sure the entire base
4315
* IPv6 header is in the first mbuf of the chain.
4316
*/
4317
if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
4318
struct ifnet *inifp = m->m_pkthdr.rcvif;
4319
if ((m = m_copyup(m, sizeof(struct ip6_hdr),
4320
(max_linkhdr + 3) & ~3)) == NULL) {
4321
/* XXXJRT new stat, please */
4322
IP6STAT_INC(ip6s_toosmall);
4323
in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4324
goto bad;
4325
}
4326
} else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) {
4327
struct ifnet *inifp = m->m_pkthdr.rcvif;
4328
if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
4329
IP6STAT_INC(ip6s_toosmall);
4330
in6_ifstat_inc(inifp, ifs6_in_hdrerr);
4331
goto bad;
4332
}
4333
}
4334
4335
ip6 = mtod(m, struct ip6_hdr *);
4336
4337
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
4338
IP6STAT_INC(ip6s_badvers);
4339
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
4340
goto bad;
4341
}
4342
4343
/* Checks out, proceed */
4344
*mp = m;
4345
return (0);
4346
4347
bad:
4348
*mp = m;
4349
return (-1);
4350
}
4351
#endif /* INET6 */
4352
4353
#ifdef INET
4354
/*
4355
* bridge_fragment:
4356
*
4357
* Fragment mbuf chain in multiple packets and prepend ethernet header.
4358
*/
4359
static int
4360
bridge_fragment(struct ifnet *ifp, struct mbuf **mp, struct ether_header *eh,
4361
int snap, struct llc *llc)
4362
{
4363
struct mbuf *m = *mp, *nextpkt = NULL, *mprev = NULL, *mcur = NULL;
4364
struct ip *ip;
4365
int error = -1;
4366
4367
if (m->m_len < sizeof(struct ip) &&
4368
(m = m_pullup(m, sizeof(struct ip))) == NULL)
4369
goto dropit;
4370
ip = mtod(m, struct ip *);
4371
4372
m->m_pkthdr.csum_flags |= CSUM_IP;
4373
error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist);
4374
if (error)
4375
goto dropit;
4376
4377
/*
4378
* Walk the chain and re-add the Ethernet header for
4379
* each mbuf packet.
4380
*/
4381
for (mcur = m; mcur; mcur = mcur->m_nextpkt) {
4382
nextpkt = mcur->m_nextpkt;
4383
mcur->m_nextpkt = NULL;
4384
if (snap) {
4385
M_PREPEND(mcur, sizeof(struct llc), M_NOWAIT);
4386
if (mcur == NULL) {
4387
error = ENOBUFS;
4388
if (mprev != NULL)
4389
mprev->m_nextpkt = nextpkt;
4390
goto dropit;
4391
}
4392
bcopy(llc, mtod(mcur, caddr_t),sizeof(struct llc));
4393
}
4394
4395
M_PREPEND(mcur, ETHER_HDR_LEN, M_NOWAIT);
4396
if (mcur == NULL) {
4397
error = ENOBUFS;
4398
if (mprev != NULL)
4399
mprev->m_nextpkt = nextpkt;
4400
goto dropit;
4401
}
4402
bcopy(eh, mtod(mcur, caddr_t), ETHER_HDR_LEN);
4403
4404
/*
4405
* The previous two M_PREPEND could have inserted one or two
4406
* mbufs in front so we have to update the previous packet's
4407
* m_nextpkt.
4408
*/
4409
mcur->m_nextpkt = nextpkt;
4410
if (mprev != NULL)
4411
mprev->m_nextpkt = mcur;
4412
else {
4413
/* The first mbuf in the original chain needs to be
4414
* updated. */
4415
*mp = mcur;
4416
}
4417
mprev = mcur;
4418
}
4419
4420
KMOD_IPSTAT_INC(ips_fragmented);
4421
return (error);
4422
4423
dropit:
4424
for (mcur = *mp; mcur; mcur = m) { /* droping the full packet chain */
4425
m = mcur->m_nextpkt;
4426
m_freem(mcur);
4427
}
4428
return (error);
4429
}
4430
#endif /* INET */
4431
4432
static void
4433
bridge_linkstate(struct ifnet *ifp)
4434
{
4435
struct bridge_softc *sc = NULL;
4436
struct bridge_iflist *bif;
4437
struct epoch_tracker et;
4438
4439
NET_EPOCH_ENTER(et);
4440
4441
bif = ifp->if_bridge;
4442
if (bif)
4443
sc = bif->bif_sc;
4444
4445
if (sc != NULL) {
4446
bridge_linkcheck(sc);
4447
bstp_linkstate(&bif->bif_stp);
4448
}
4449
4450
NET_EPOCH_EXIT(et);
4451
}
4452
4453
static void
4454
bridge_linkcheck(struct bridge_softc *sc)
4455
{
4456
struct bridge_iflist *bif;
4457
int new_link, hasls;
4458
4459
BRIDGE_LOCK_OR_NET_EPOCH_ASSERT(sc);
4460
4461
new_link = LINK_STATE_DOWN;
4462
hasls = 0;
4463
/* Our link is considered up if at least one of our ports is active */
4464
CK_LIST_FOREACH(bif, &sc->sc_iflist, bif_next) {
4465
if (bif->bif_ifp->if_capabilities & IFCAP_LINKSTATE)
4466
hasls++;
4467
if (bif->bif_ifp->if_link_state == LINK_STATE_UP) {
4468
new_link = LINK_STATE_UP;
4469
break;
4470
}
4471
}
4472
if (!CK_LIST_EMPTY(&sc->sc_iflist) && !hasls) {
4473
/* If no interfaces support link-state then we default to up */
4474
new_link = LINK_STATE_UP;
4475
}
4476
if_link_state_change(sc->sc_ifp, new_link);
4477
}
4478
4479