br_forward.c 6.5 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
/*
 *	Forwarding decision
 *	Linux ethernet bridge
 *
 *	Authors:
 *	Lennert Buytenhek		<buytenh@gnu.org>
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 */

14
#include <linux/err.h>
15
#include <linux/slab.h>
Linus Torvalds's avatar
Linus Torvalds committed
16
17
#include <linux/kernel.h>
#include <linux/netdevice.h>
18
#include <linux/netpoll.h>
Linus Torvalds's avatar
Linus Torvalds committed
19
#include <linux/skbuff.h>
20
#include <linux/if_vlan.h>
Linus Torvalds's avatar
Linus Torvalds committed
21
22
23
#include <linux/netfilter_bridge.h>
#include "br_private.h"

24
25
static int deliver_clone(const struct net_bridge_port *prev,
			 struct sk_buff *skb,
26
27
28
			 void (*__packet_hook)(const struct net_bridge_port *p,
					       struct sk_buff *skb));

tanxiaojun's avatar
tanxiaojun committed
29
/* Don't forward packets to originating port or forwarding disabled */
30
static inline int should_deliver(const struct net_bridge_port *p,
Linus Torvalds's avatar
Linus Torvalds committed
31
32
				 const struct sk_buff *skb)
{
33
	return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
34
		br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
35
		p->state == BR_STATE_FORWARDING;
Linus Torvalds's avatar
Linus Torvalds committed
36
37
38
39
}

int br_dev_queue_push_xmit(struct sk_buff *skb)
{
40
41
	/* ip_fragment doesn't copy the MAC header */
	if (nf_bridge_maybe_copy_header(skb) ||
42
	    !is_skb_forwardable(skb->dev, skb)) {
Linus Torvalds's avatar
Linus Torvalds committed
43
		kfree_skb(skb);
44
45
	} else {
		skb_push(skb, ETH_HLEN);
46
		br_drop_fake_rtable(skb);
47
		dev_queue_xmit(skb);
Linus Torvalds's avatar
Linus Torvalds committed
48
49
50
51
	}

	return 0;
}
52
EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
Linus Torvalds's avatar
Linus Torvalds committed
53
54
55

int br_forward_finish(struct sk_buff *skb)
{
56
	return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
57
		       br_dev_queue_push_xmit);
Linus Torvalds's avatar
Linus Torvalds committed
58
59

}
60
EXPORT_SYMBOL_GPL(br_forward_finish);
Linus Torvalds's avatar
Linus Torvalds committed
61
62
63

static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
64
65
66
67
	skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
	if (!skb)
		return;

Linus Torvalds's avatar
Linus Torvalds committed
68
	skb->dev = to->dev;
Herbert Xu's avatar
Herbert Xu committed
69

70
	if (unlikely(netpoll_tx_running(to->br->dev))) {
71
		if (!is_skb_forwardable(skb->dev, skb))
Herbert Xu's avatar
Herbert Xu committed
72
73
74
75
76
77
78
79
			kfree_skb(skb);
		else {
			skb_push(skb, ETH_HLEN);
			br_netpoll_send_skb(to, skb);
		}
		return;
	}

80
81
	NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
		br_forward_finish);
Linus Torvalds's avatar
Linus Torvalds committed
82
83
84
85
86
87
}

static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
{
	struct net_device *indev;

Herbert Xu's avatar
Herbert Xu committed
88
89
90
91
92
	if (skb_warn_if_lro(skb)) {
		kfree_skb(skb);
		return;
	}

93
94
95
96
	skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
	if (!skb)
		return;

Linus Torvalds's avatar
Linus Torvalds committed
97
98
	indev = skb->dev;
	skb->dev = to->dev;
99
	skb_forward_csum(skb);
Linus Torvalds's avatar
Linus Torvalds committed
100

101
102
	NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
		br_forward_finish);
Linus Torvalds's avatar
Linus Torvalds committed
103
104
105
106
107
}

/* called with rcu_read_lock */
void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
108
	if (to && should_deliver(to, skb)) {
Linus Torvalds's avatar
Linus Torvalds committed
109
110
111
112
113
114
115
116
		__br_deliver(to, skb);
		return;
	}

	kfree_skb(skb);
}

/* called with rcu_read_lock */
117
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
Linus Torvalds's avatar
Linus Torvalds committed
118
{
Herbert Xu's avatar
Herbert Xu committed
119
	if (should_deliver(to, skb)) {
120
121
122
123
		if (skb0)
			deliver_clone(to, skb, __br_forward);
		else
			__br_forward(to, skb);
Linus Torvalds's avatar
Linus Torvalds committed
124
125
126
		return;
	}

127
128
	if (!skb0)
		kfree_skb(skb);
Linus Torvalds's avatar
Linus Torvalds committed
129
130
}

131
132
static int deliver_clone(const struct net_bridge_port *prev,
			 struct sk_buff *skb,
133
134
135
			 void (*__packet_hook)(const struct net_bridge_port *p,
					       struct sk_buff *skb))
{
136
137
	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;

138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
	skb = skb_clone(skb, GFP_ATOMIC);
	if (!skb) {
		dev->stats.tx_dropped++;
		return -ENOMEM;
	}

	__packet_hook(prev, skb);
	return 0;
}

static struct net_bridge_port *maybe_deliver(
	struct net_bridge_port *prev, struct net_bridge_port *p,
	struct sk_buff *skb,
	void (*__packet_hook)(const struct net_bridge_port *p,
			      struct sk_buff *skb))
{
	int err;

	if (!should_deliver(p, skb))
		return prev;

	if (!prev)
		goto out;

	err = deliver_clone(prev, skb, __packet_hook);
	if (err)
		return ERR_PTR(err);

out:
	return p;
}

Linus Torvalds's avatar
Linus Torvalds committed
170
/* called under bridge lock */
171
static void br_flood(struct net_bridge *br, struct sk_buff *skb,
172
173
		     struct sk_buff *skb0,
		     void (*__packet_hook)(const struct net_bridge_port *p,
174
175
					   struct sk_buff *skb),
		     bool unicast)
Linus Torvalds's avatar
Linus Torvalds committed
176
177
178
179
180
181
182
{
	struct net_bridge_port *p;
	struct net_bridge_port *prev;

	prev = NULL;

	list_for_each_entry_rcu(p, &br->port_list, list) {
183
184
185
		/* Do not flood unicast traffic to ports that turn it off */
		if (unicast && !(p->flags & BR_FLOOD))
			continue;
186
187
188
189
190

		/* Do not flood to ports that enable proxy ARP */
		if (p->flags & BR_PROXYARP)
			continue;

191
192
193
		prev = maybe_deliver(prev, p, skb, __packet_hook);
		if (IS_ERR(prev))
			goto out;
Linus Torvalds's avatar
Linus Torvalds committed
194
195
	}

196
197
198
	if (!prev)
		goto out;

199
200
201
202
	if (skb0)
		deliver_clone(prev, skb, __packet_hook);
	else
		__packet_hook(prev, skb);
203
	return;
Linus Torvalds's avatar
Linus Torvalds committed
204

205
206
207
out:
	if (!skb0)
		kfree_skb(skb);
Linus Torvalds's avatar
Linus Torvalds committed
208
209
210
211
}


/* called with rcu_read_lock */
212
void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast)
Linus Torvalds's avatar
Linus Torvalds committed
213
{
214
	br_flood(br, skb, NULL, __br_deliver, unicast);
Linus Torvalds's avatar
Linus Torvalds committed
215
216
217
}

/* called under bridge lock */
218
void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
219
		      struct sk_buff *skb2, bool unicast)
Linus Torvalds's avatar
Linus Torvalds committed
220
{
221
	br_flood(br, skb, skb2, __br_forward, unicast);
Linus Torvalds's avatar
Linus Torvalds committed
222
}
223
224
225
226
227
228
229
230
231
232
233

#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
/* called with rcu_read_lock */
static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
			       struct sk_buff *skb, struct sk_buff *skb0,
			       void (*__packet_hook)(
					const struct net_bridge_port *p,
					struct sk_buff *skb))
{
	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
	struct net_bridge *br = netdev_priv(dev);
234
	struct net_bridge_port *prev = NULL;
235
236
237
	struct net_bridge_port_group *p;
	struct hlist_node *rp;

238
	rp = rcu_dereference(hlist_first_rcu(&br->router_list));
239
	p = mdst ? rcu_dereference(mdst->ports) : NULL;
240
	while (p || rp) {
241
242
		struct net_bridge_port *port, *lport, *rport;

243
244
245
246
247
248
249
250
251
252
253
254
		lport = p ? p->port : NULL;
		rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
			     NULL;

		port = (unsigned long)lport > (unsigned long)rport ?
		       lport : rport;

		prev = maybe_deliver(prev, port, skb, __packet_hook);
		if (IS_ERR(prev))
			goto out;

		if ((unsigned long)lport >= (unsigned long)port)
255
			p = rcu_dereference(p->next);
256
		if ((unsigned long)rport >= (unsigned long)port)
257
			rp = rcu_dereference(hlist_next_rcu(rp));
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
	}

	if (!prev)
		goto out;

	if (skb0)
		deliver_clone(prev, skb, __packet_hook);
	else
		__packet_hook(prev, skb);
	return;

out:
	if (!skb0)
		kfree_skb(skb);
}

/* called with rcu_read_lock */
void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
			  struct sk_buff *skb)
{
	br_multicast_flood(mdst, skb, NULL, __br_deliver);
}

/* called with rcu_read_lock */
void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
			  struct sk_buff *skb, struct sk_buff *skb2)
{
	br_multicast_flood(mdst, skb, skb2, __br_forward);
}
#endif