2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for executing flow actions. */
11 #include <linux/skbuff.h>
14 #include <linux/tcp.h>
15 #include <linux/udp.h>
16 #include <linux/in6.h>
17 #include <linux/if_arp.h>
18 #include <linux/if_vlan.h>
19 #include <net/inet_ecn.h>
21 #include <net/checksum.h>
26 #include "openvswitch/datapath-protocol.h"
30 static int do_execute_actions(struct datapath *, struct sk_buff *,
31 const struct sw_flow_key *,
32 const struct nlattr *actions, u32 actions_len);
34 static struct sk_buff *make_writable(struct sk_buff *skb, unsigned min_headroom)
36 if (skb_cloned(skb)) {
38 unsigned headroom = max(min_headroom, skb_headroom(skb));
40 nskb = skb_copy_expand(skb, headroom, skb_tailroom(skb), GFP_ATOMIC);
42 set_skb_csum_bits(skb, nskb);
47 unsigned int hdr_len = (skb_transport_offset(skb)
48 + sizeof(struct tcphdr));
49 if (pskb_may_pull(skb, min(hdr_len, skb->len)))
56 static struct sk_buff *strip_vlan(struct sk_buff *skb)
60 if (vlan_tx_tag_present(skb)) {
65 if (unlikely(vlan_eth_hdr(skb)->h_vlan_proto != htons(ETH_P_8021Q) ||
66 skb->len < VLAN_ETH_HLEN))
69 skb = make_writable(skb, 0);
73 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
74 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
75 + ETH_HLEN, VLAN_HLEN, 0));
77 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
79 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
81 skb->protocol = eh->h_proto;
82 skb->mac_header += VLAN_HLEN;
87 static struct sk_buff *modify_vlan_tci(struct sk_buff *skb, __be16 tci)
89 struct vlan_ethhdr *vh;
92 if (vlan_tx_tag_present(skb) || skb->protocol != htons(ETH_P_8021Q))
93 return __vlan_hwaccel_put_tag(skb, ntohs(tci));
95 skb = make_writable(skb, 0);
99 if (unlikely(skb->len < VLAN_ETH_HLEN))
102 vh = vlan_eth_hdr(skb);
104 old_tci = vh->h_vlan_TCI;
105 vh->h_vlan_TCI = tci;
107 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) {
108 __be16 diff[] = { ~old_tci, vh->h_vlan_TCI };
109 skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum);
115 static bool is_ip(struct sk_buff *skb, const struct sw_flow_key *key)
117 return (key->dl_type == htons(ETH_P_IP) &&
118 skb->transport_header > skb->network_header);
121 static __sum16 *get_l4_checksum(struct sk_buff *skb, const struct sw_flow_key *key)
123 int transport_len = skb->len - skb_transport_offset(skb);
124 if (key->nw_proto == IPPROTO_TCP) {
125 if (likely(transport_len >= sizeof(struct tcphdr)))
126 return &tcp_hdr(skb)->check;
127 } else if (key->nw_proto == IPPROTO_UDP) {
128 if (likely(transport_len >= sizeof(struct udphdr)))
129 return &udp_hdr(skb)->check;
134 static struct sk_buff *set_nw_addr(struct sk_buff *skb,
135 const struct sw_flow_key *key,
136 const struct nlattr *a)
138 __be32 new_nwaddr = nla_get_be32(a);
143 if (unlikely(!is_ip(skb, key)))
146 skb = make_writable(skb, 0);
151 nwaddr = nla_type(a) == ODP_ACTION_ATTR_SET_NW_SRC ? &nh->saddr : &nh->daddr;
153 check = get_l4_checksum(skb, key);
155 inet_proto_csum_replace4(check, skb, *nwaddr, new_nwaddr, 1);
156 csum_replace4(&nh->check, *nwaddr, new_nwaddr);
158 skb_clear_rxhash(skb);
160 *nwaddr = new_nwaddr;
165 static struct sk_buff *set_nw_tos(struct sk_buff *skb,
166 const struct sw_flow_key *key,
169 if (unlikely(!is_ip(skb, key)))
172 skb = make_writable(skb, 0);
174 struct iphdr *nh = ip_hdr(skb);
179 /* Set the DSCP bits and preserve the ECN bits. */
180 new = nw_tos | (nh->tos & INET_ECN_MASK);
181 csum_replace4(&nh->check, (__force __be32)old,
182 (__force __be32)new);
188 static struct sk_buff *set_tp_port(struct sk_buff *skb,
189 const struct sw_flow_key *key,
190 const struct nlattr *a)
196 if (unlikely(!is_ip(skb, key)))
199 skb = make_writable(skb, 0);
203 /* Must follow make_writable() since that can move the skb data. */
204 check = get_l4_checksum(skb, key);
205 if (unlikely(!check))
209 * Update port and checksum.
211 * This is OK because source and destination port numbers are at the
212 * same offsets in both UDP and TCP headers, and get_l4_checksum() only
213 * supports those protocols.
216 port = nla_type(a) == ODP_ACTION_ATTR_SET_TP_SRC ? &th->source : &th->dest;
217 inet_proto_csum_replace2(check, skb, *port, nla_get_be16(a), 0);
218 *port = nla_get_be16(a);
219 skb_clear_rxhash(skb);
225 * is_spoofed_arp - check for invalid ARP packet
227 * @skb: skbuff containing an Ethernet packet, with network header pointing
228 * just past the Ethernet and optional 802.1Q header.
229 * @key: flow key extracted from @skb by flow_extract()
231 * Returns true if @skb is an invalid Ethernet+IPv4 ARP packet: one with screwy
232 * or truncated header fields or one whose inner and outer Ethernet address
235 static bool is_spoofed_arp(struct sk_buff *skb, const struct sw_flow_key *key)
237 struct arp_eth_header *arp;
239 if (key->dl_type != htons(ETH_P_ARP))
242 if (skb_network_offset(skb) + sizeof(struct arp_eth_header) > skb->len)
245 arp = (struct arp_eth_header *)skb_network_header(skb);
246 return (arp->ar_hrd != htons(ARPHRD_ETHER) ||
247 arp->ar_pro != htons(ETH_P_IP) ||
248 arp->ar_hln != ETH_ALEN ||
250 compare_ether_addr(arp->ar_sha, eth_hdr(skb)->h_source));
253 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
260 p = rcu_dereference(dp->ports[out_port]);
271 static int output_control(struct datapath *dp, struct sk_buff *skb, u64 arg,
272 const struct sw_flow_key *key)
274 struct dp_upcall_info upcall;
276 skb = skb_clone(skb, GFP_ATOMIC);
280 upcall.cmd = ODP_PACKET_CMD_ACTION;
282 upcall.userdata = arg;
283 upcall.sample_pool = 0;
284 upcall.actions = NULL;
285 upcall.actions_len = 0;
286 return dp_upcall(dp, skb, &upcall);
289 /* Execute a list of actions against 'skb'. */
290 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
291 const struct sw_flow_key *key,
292 const struct nlattr *actions, u32 actions_len)
294 /* Every output action needs a separate clone of 'skb', but the common
295 * case is just a single output action, so that doing a clone and
296 * then freeing the original skbuff is wasteful. So the following code
297 * is slightly obscure just to avoid that. */
299 u32 priority = skb->priority;
300 const struct nlattr *a;
303 for (a = actions, rem = actions_len; rem > 0; a = nla_next(a, &rem)) {
304 if (prev_port != -1) {
305 do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
309 switch (nla_type(a)) {
310 case ODP_ACTION_ATTR_OUTPUT:
311 prev_port = nla_get_u32(a);
314 case ODP_ACTION_ATTR_CONTROLLER:
315 err = output_control(dp, skb, nla_get_u64(a), key);
322 case ODP_ACTION_ATTR_SET_TUNNEL:
323 OVS_CB(skb)->tun_id = nla_get_be64(a);
326 case ODP_ACTION_ATTR_SET_DL_TCI:
327 skb = modify_vlan_tci(skb, nla_get_be16(a));
330 case ODP_ACTION_ATTR_STRIP_VLAN:
331 skb = strip_vlan(skb);
334 case ODP_ACTION_ATTR_SET_DL_SRC:
335 skb = make_writable(skb, 0);
338 memcpy(eth_hdr(skb)->h_source, nla_data(a), ETH_ALEN);
341 case ODP_ACTION_ATTR_SET_DL_DST:
342 skb = make_writable(skb, 0);
345 memcpy(eth_hdr(skb)->h_dest, nla_data(a), ETH_ALEN);
348 case ODP_ACTION_ATTR_SET_NW_SRC:
349 case ODP_ACTION_ATTR_SET_NW_DST:
350 skb = set_nw_addr(skb, key, a);
353 case ODP_ACTION_ATTR_SET_NW_TOS:
354 skb = set_nw_tos(skb, key, nla_get_u8(a));
357 case ODP_ACTION_ATTR_SET_TP_SRC:
358 case ODP_ACTION_ATTR_SET_TP_DST:
359 skb = set_tp_port(skb, key, a);
362 case ODP_ACTION_ATTR_SET_PRIORITY:
363 skb->priority = nla_get_u32(a);
366 case ODP_ACTION_ATTR_POP_PRIORITY:
367 skb->priority = priority;
370 case ODP_ACTION_ATTR_DROP_SPOOFED_ARP:
371 if (unlikely(is_spoofed_arp(skb, key)))
380 do_output(dp, skb, prev_port);
386 static void sflow_sample(struct datapath *dp, struct sk_buff *skb,
387 const struct sw_flow_key *key,
388 const struct nlattr *a, u32 actions_len)
390 struct sk_buff *nskb;
391 struct vport *p = OVS_CB(skb)->vport;
392 struct dp_upcall_info upcall;
397 atomic_inc(&p->sflow_pool);
398 if (net_random() >= dp->sflow_probability)
401 nskb = skb_clone(skb, GFP_ATOMIC);
405 upcall.cmd = ODP_PACKET_CMD_SAMPLE;
408 upcall.sample_pool = atomic_read(&p->sflow_pool);
410 upcall.actions_len = actions_len;
411 dp_upcall(dp, nskb, &upcall);
414 /* Execute a list of actions against 'skb'. */
415 int execute_actions(struct datapath *dp, struct sk_buff *skb,
416 const struct sw_flow_key *key,
417 const struct nlattr *actions, u32 actions_len)
419 if (dp->sflow_probability)
420 sflow_sample(dp, skb, key, actions, actions_len);
422 OVS_CB(skb)->tun_id = 0;
424 return do_execute_actions(dp, skb, key, actions, actions_len);