Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/net/bridge/br_forward.c
15109 views
1
/*
2
* Forwarding decision
3
* Linux ethernet bridge
4
*
5
* Authors:
6
* Lennert Buytenhek <[email protected]>
7
*
8
* This program is free software; you can redistribute it and/or
9
* modify it under the terms of the GNU General Public License
10
* as published by the Free Software Foundation; either version
11
* 2 of the License, or (at your option) any later version.
12
*/
13
14
#include <linux/err.h>
15
#include <linux/slab.h>
16
#include <linux/kernel.h>
17
#include <linux/netdevice.h>
18
#include <linux/netpoll.h>
19
#include <linux/skbuff.h>
20
#include <linux/if_vlan.h>
21
#include <linux/netfilter_bridge.h>
22
#include "br_private.h"
23
24
static int deliver_clone(const struct net_bridge_port *prev,
25
struct sk_buff *skb,
26
void (*__packet_hook)(const struct net_bridge_port *p,
27
struct sk_buff *skb));
28
29
/* Don't forward packets to originating port or forwarding diasabled */
30
static inline int should_deliver(const struct net_bridge_port *p,
31
const struct sk_buff *skb)
32
{
33
return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
34
p->state == BR_STATE_FORWARDING);
35
}
36
37
static inline unsigned packet_length(const struct sk_buff *skb)
38
{
39
return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
40
}
41
42
int br_dev_queue_push_xmit(struct sk_buff *skb)
43
{
44
/* ip_fragment doesn't copy the MAC header */
45
if (nf_bridge_maybe_copy_header(skb) ||
46
(packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))) {
47
kfree_skb(skb);
48
} else {
49
skb_push(skb, ETH_HLEN);
50
dev_queue_xmit(skb);
51
}
52
53
return 0;
54
}
55
56
int br_forward_finish(struct sk_buff *skb)
57
{
58
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
59
br_dev_queue_push_xmit);
60
61
}
62
63
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
64
{
65
skb->dev = to->dev;
66
67
if (unlikely(netpoll_tx_running(to->dev))) {
68
if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
69
kfree_skb(skb);
70
else {
71
skb_push(skb, ETH_HLEN);
72
br_netpoll_send_skb(to, skb);
73
}
74
return;
75
}
76
77
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
78
br_forward_finish);
79
}
80
81
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
82
{
83
struct net_device *indev;
84
85
if (skb_warn_if_lro(skb)) {
86
kfree_skb(skb);
87
return;
88
}
89
90
indev = skb->dev;
91
skb->dev = to->dev;
92
skb_forward_csum(skb);
93
94
NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
95
br_forward_finish);
96
}
97
98
/* called with rcu_read_lock */
99
void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
100
{
101
if (should_deliver(to, skb)) {
102
__br_deliver(to, skb);
103
return;
104
}
105
106
kfree_skb(skb);
107
}
108
109
/* called with rcu_read_lock */
110
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
111
{
112
if (should_deliver(to, skb)) {
113
if (skb0)
114
deliver_clone(to, skb, __br_forward);
115
else
116
__br_forward(to, skb);
117
return;
118
}
119
120
if (!skb0)
121
kfree_skb(skb);
122
}
123
124
static int deliver_clone(const struct net_bridge_port *prev,
125
struct sk_buff *skb,
126
void (*__packet_hook)(const struct net_bridge_port *p,
127
struct sk_buff *skb))
128
{
129
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
130
131
skb = skb_clone(skb, GFP_ATOMIC);
132
if (!skb) {
133
dev->stats.tx_dropped++;
134
return -ENOMEM;
135
}
136
137
__packet_hook(prev, skb);
138
return 0;
139
}
140
141
static struct net_bridge_port *maybe_deliver(
142
struct net_bridge_port *prev, struct net_bridge_port *p,
143
struct sk_buff *skb,
144
void (*__packet_hook)(const struct net_bridge_port *p,
145
struct sk_buff *skb))
146
{
147
int err;
148
149
if (!should_deliver(p, skb))
150
return prev;
151
152
if (!prev)
153
goto out;
154
155
err = deliver_clone(prev, skb, __packet_hook);
156
if (err)
157
return ERR_PTR(err);
158
159
out:
160
return p;
161
}
162
163
/* called under bridge lock */
164
static void br_flood(struct net_bridge *br, struct sk_buff *skb,
165
struct sk_buff *skb0,
166
void (*__packet_hook)(const struct net_bridge_port *p,
167
struct sk_buff *skb))
168
{
169
struct net_bridge_port *p;
170
struct net_bridge_port *prev;
171
172
prev = NULL;
173
174
list_for_each_entry_rcu(p, &br->port_list, list) {
175
prev = maybe_deliver(prev, p, skb, __packet_hook);
176
if (IS_ERR(prev))
177
goto out;
178
}
179
180
if (!prev)
181
goto out;
182
183
if (skb0)
184
deliver_clone(prev, skb, __packet_hook);
185
else
186
__packet_hook(prev, skb);
187
return;
188
189
out:
190
if (!skb0)
191
kfree_skb(skb);
192
}
193
194
195
/* called with rcu_read_lock */
196
void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
197
{
198
br_flood(br, skb, NULL, __br_deliver);
199
}
200
201
/* called under bridge lock */
202
void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
203
struct sk_buff *skb2)
204
{
205
br_flood(br, skb, skb2, __br_forward);
206
}
207
208
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
209
/* called with rcu_read_lock */
210
static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
211
struct sk_buff *skb, struct sk_buff *skb0,
212
void (*__packet_hook)(
213
const struct net_bridge_port *p,
214
struct sk_buff *skb))
215
{
216
struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
217
struct net_bridge *br = netdev_priv(dev);
218
struct net_bridge_port *prev = NULL;
219
struct net_bridge_port_group *p;
220
struct hlist_node *rp;
221
222
rp = rcu_dereference(hlist_first_rcu(&br->router_list));
223
p = mdst ? rcu_dereference(mdst->ports) : NULL;
224
while (p || rp) {
225
struct net_bridge_port *port, *lport, *rport;
226
227
lport = p ? p->port : NULL;
228
rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
229
NULL;
230
231
port = (unsigned long)lport > (unsigned long)rport ?
232
lport : rport;
233
234
prev = maybe_deliver(prev, port, skb, __packet_hook);
235
if (IS_ERR(prev))
236
goto out;
237
238
if ((unsigned long)lport >= (unsigned long)port)
239
p = rcu_dereference(p->next);
240
if ((unsigned long)rport >= (unsigned long)port)
241
rp = rcu_dereference(hlist_next_rcu(rp));
242
}
243
244
if (!prev)
245
goto out;
246
247
if (skb0)
248
deliver_clone(prev, skb, __packet_hook);
249
else
250
__packet_hook(prev, skb);
251
return;
252
253
out:
254
if (!skb0)
255
kfree_skb(skb);
256
}
257
258
/* called with rcu_read_lock */
259
void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
260
struct sk_buff *skb)
261
{
262
br_multicast_flood(mdst, skb, NULL, __br_deliver);
263
}
264
265
/* called with rcu_read_lock */
266
void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
267
struct sk_buff *skb, struct sk_buff *skb2)
268
{
269
br_multicast_flood(mdst, skb, skb2, __br_forward);
270
}
271
#endif
272
273