2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for executing flow actions. */
11 #include <linux/skbuff.h>
14 #include <linux/tcp.h>
15 #include <linux/udp.h>
16 #include <linux/in6.h>
17 #include <linux/if_arp.h>
18 #include <linux/if_vlan.h>
19 #include <net/inet_ecn.h>
21 #include <net/checksum.h>
26 #include "loop_counter.h"
27 #include "openvswitch/datapath-protocol.h"
31 static int do_execute_actions(struct datapath *, struct sk_buff *,
32 struct sw_flow_actions *acts);
34 static struct sk_buff *make_writable(struct sk_buff *skb, unsigned min_headroom)
36 if (skb_cloned(skb)) {
38 unsigned headroom = max(min_headroom, skb_headroom(skb));
40 nskb = skb_copy_expand(skb, headroom, skb_tailroom(skb), GFP_ATOMIC);
42 set_skb_csum_bits(skb, nskb);
47 unsigned int hdr_len = (skb_transport_offset(skb)
48 + sizeof(struct tcphdr));
49 if (pskb_may_pull(skb, min(hdr_len, skb->len)))
56 static struct sk_buff *strip_vlan(struct sk_buff *skb)
60 if (vlan_tx_tag_present(skb)) {
65 if (unlikely(vlan_eth_hdr(skb)->h_vlan_proto != htons(ETH_P_8021Q) ||
66 skb->len < VLAN_ETH_HLEN))
69 skb = make_writable(skb, 0);
73 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
74 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
75 + ETH_HLEN, VLAN_HLEN, 0));
77 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
79 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
81 skb->protocol = eh->h_proto;
82 skb->mac_header += VLAN_HLEN;
87 static struct sk_buff *modify_vlan_tci(struct sk_buff *skb, __be16 tci)
89 struct vlan_ethhdr *vh;
92 if (vlan_tx_tag_present(skb) || skb->protocol != htons(ETH_P_8021Q))
93 return __vlan_hwaccel_put_tag(skb, ntohs(tci));
95 skb = make_writable(skb, 0);
99 if (unlikely(skb->len < VLAN_ETH_HLEN))
102 vh = vlan_eth_hdr(skb);
104 old_tci = vh->h_vlan_TCI;
105 vh->h_vlan_TCI = tci;
107 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) {
108 __be16 diff[] = { ~old_tci, vh->h_vlan_TCI };
109 skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum);
115 static bool is_ip(struct sk_buff *skb)
117 return (OVS_CB(skb)->flow->key.eth.type == htons(ETH_P_IP) &&
118 skb->transport_header > skb->network_header);
121 static __sum16 *get_l4_checksum(struct sk_buff *skb)
123 u8 nw_proto = OVS_CB(skb)->flow->key.ip.nw_proto;
124 int transport_len = skb->len - skb_transport_offset(skb);
125 if (nw_proto == IPPROTO_TCP) {
126 if (likely(transport_len >= sizeof(struct tcphdr)))
127 return &tcp_hdr(skb)->check;
128 } else if (nw_proto == IPPROTO_UDP) {
129 if (likely(transport_len >= sizeof(struct udphdr)))
130 return &udp_hdr(skb)->check;
135 static struct sk_buff *set_nw_addr(struct sk_buff *skb, const struct nlattr *a)
137 __be32 new_nwaddr = nla_get_be32(a);
142 if (unlikely(!is_ip(skb)))
145 skb = make_writable(skb, 0);
150 nwaddr = nla_type(a) == ODP_ACTION_ATTR_SET_NW_SRC ? &nh->saddr : &nh->daddr;
152 check = get_l4_checksum(skb);
154 inet_proto_csum_replace4(check, skb, *nwaddr, new_nwaddr, 1);
155 csum_replace4(&nh->check, *nwaddr, new_nwaddr);
157 skb_clear_rxhash(skb);
159 *nwaddr = new_nwaddr;
164 static struct sk_buff *set_nw_tos(struct sk_buff *skb, u8 nw_tos)
166 if (unlikely(!is_ip(skb)))
169 skb = make_writable(skb, 0);
171 struct iphdr *nh = ip_hdr(skb);
176 /* Set the DSCP bits and preserve the ECN bits. */
177 new = nw_tos | (nh->tos & INET_ECN_MASK);
178 csum_replace4(&nh->check, (__force __be32)old,
179 (__force __be32)new);
185 static struct sk_buff *set_tp_port(struct sk_buff *skb, const struct nlattr *a)
191 if (unlikely(!is_ip(skb)))
194 skb = make_writable(skb, 0);
198 /* Must follow make_writable() since that can move the skb data. */
199 check = get_l4_checksum(skb);
200 if (unlikely(!check))
204 * Update port and checksum.
206 * This is OK because source and destination port numbers are at the
207 * same offsets in both UDP and TCP headers, and get_l4_checksum() only
208 * supports those protocols.
211 port = nla_type(a) == ODP_ACTION_ATTR_SET_TP_SRC ? &th->source : &th->dest;
212 inet_proto_csum_replace2(check, skb, *port, nla_get_be16(a), 0);
213 *port = nla_get_be16(a);
214 skb_clear_rxhash(skb);
220 * is_spoofed_arp - check for invalid ARP packet
222 * @skb: skbuff containing an Ethernet packet, with network header pointing
223 * just past the Ethernet and optional 802.1Q header.
225 * Returns true if @skb is an invalid Ethernet+IPv4 ARP packet: one with screwy
226 * or truncated header fields or one whose inner and outer Ethernet address
229 static bool is_spoofed_arp(struct sk_buff *skb)
231 struct arp_eth_header *arp;
233 if (OVS_CB(skb)->flow->key.eth.type != htons(ETH_P_ARP))
236 if (skb_network_offset(skb) + sizeof(struct arp_eth_header) > skb->len)
239 arp = (struct arp_eth_header *)skb_network_header(skb);
240 return (arp->ar_hrd != htons(ARPHRD_ETHER) ||
241 arp->ar_pro != htons(ETH_P_IP) ||
242 arp->ar_hln != ETH_ALEN ||
244 compare_ether_addr(arp->ar_sha, eth_hdr(skb)->h_source));
247 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
254 p = rcu_dereference(dp->ports[out_port]);
265 static int output_control(struct datapath *dp, struct sk_buff *skb, u64 arg)
267 struct dp_upcall_info upcall;
269 skb = skb_clone(skb, GFP_ATOMIC);
273 upcall.cmd = ODP_PACKET_CMD_ACTION;
274 upcall.key = &OVS_CB(skb)->flow->key;
275 upcall.userdata = arg;
276 upcall.sample_pool = 0;
277 upcall.actions = NULL;
278 upcall.actions_len = 0;
279 return dp_upcall(dp, skb, &upcall);
282 /* Execute a list of actions against 'skb'. */
283 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
284 struct sw_flow_actions *acts)
286 /* Every output action needs a separate clone of 'skb', but the common
287 * case is just a single output action, so that doing a clone and
288 * then freeing the original skbuff is wasteful. So the following code
289 * is slightly obscure just to avoid that. */
291 u32 priority = skb->priority;
292 const struct nlattr *a;
295 for (a = acts->actions, rem = acts->actions_len; rem > 0;
296 a = nla_next(a, &rem)) {
297 if (prev_port != -1) {
298 do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
302 switch (nla_type(a)) {
303 case ODP_ACTION_ATTR_OUTPUT:
304 prev_port = nla_get_u32(a);
307 case ODP_ACTION_ATTR_CONTROLLER:
308 err = output_control(dp, skb, nla_get_u64(a));
315 case ODP_ACTION_ATTR_SET_TUNNEL:
316 OVS_CB(skb)->tun_id = nla_get_be64(a);
319 case ODP_ACTION_ATTR_SET_DL_TCI:
320 skb = modify_vlan_tci(skb, nla_get_be16(a));
323 case ODP_ACTION_ATTR_STRIP_VLAN:
324 skb = strip_vlan(skb);
327 case ODP_ACTION_ATTR_SET_DL_SRC:
328 skb = make_writable(skb, 0);
331 memcpy(eth_hdr(skb)->h_source, nla_data(a), ETH_ALEN);
334 case ODP_ACTION_ATTR_SET_DL_DST:
335 skb = make_writable(skb, 0);
338 memcpy(eth_hdr(skb)->h_dest, nla_data(a), ETH_ALEN);
341 case ODP_ACTION_ATTR_SET_NW_SRC:
342 case ODP_ACTION_ATTR_SET_NW_DST:
343 skb = set_nw_addr(skb, a);
346 case ODP_ACTION_ATTR_SET_NW_TOS:
347 skb = set_nw_tos(skb, nla_get_u8(a));
350 case ODP_ACTION_ATTR_SET_TP_SRC:
351 case ODP_ACTION_ATTR_SET_TP_DST:
352 skb = set_tp_port(skb, a);
355 case ODP_ACTION_ATTR_SET_PRIORITY:
356 skb->priority = nla_get_u32(a);
359 case ODP_ACTION_ATTR_POP_PRIORITY:
360 skb->priority = priority;
363 case ODP_ACTION_ATTR_DROP_SPOOFED_ARP:
364 if (unlikely(is_spoofed_arp(skb)))
373 do_output(dp, skb, prev_port);
379 static void sflow_sample(struct datapath *dp, struct sk_buff *skb,
380 struct sw_flow_actions *acts)
382 struct sk_buff *nskb;
383 struct vport *p = OVS_CB(skb)->vport;
384 struct dp_upcall_info upcall;
389 atomic_inc(&p->sflow_pool);
390 if (net_random() >= dp->sflow_probability)
393 nskb = skb_clone(skb, GFP_ATOMIC);
397 upcall.cmd = ODP_PACKET_CMD_SAMPLE;
398 upcall.key = &OVS_CB(skb)->flow->key;
400 upcall.sample_pool = atomic_read(&p->sflow_pool);
401 upcall.actions = acts->actions;
402 upcall.actions_len = acts->actions_len;
403 dp_upcall(dp, nskb, &upcall);
406 /* Execute a list of actions against 'skb'. */
407 int execute_actions(struct datapath *dp, struct sk_buff *skb)
409 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
410 struct loop_counter *loop;
413 /* Check whether we've looped too much. */
414 loop = loop_get_counter();
415 if (unlikely(++loop->count > MAX_LOOPS))
416 loop->looping = true;
417 if (unlikely(loop->looping)) {
418 error = loop_suppress(dp, acts);
423 /* Really execute actions. */
424 if (dp->sflow_probability)
425 sflow_sample(dp, skb, acts);
426 OVS_CB(skb)->tun_id = 0;
427 error = do_execute_actions(dp, skb, acts);
429 /* Check whether sub-actions looped too much. */
430 if (unlikely(loop->looping))
431 error = loop_suppress(dp, acts);
434 /* Decrement loop counter. */
436 loop->looping = false;