2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for executing flow actions. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/skbuff.h>
16 #include <linux/openvswitch.h>
17 #include <linux/tcp.h>
18 #include <linux/udp.h>
19 #include <linux/in6.h>
20 #include <linux/if_arp.h>
21 #include <linux/if_vlan.h>
22 #include <net/inet_ecn.h>
24 #include <net/checksum.h>
32 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
33 const struct nlattr *attr, int len, bool keep_skb);
35 static int make_writable(struct sk_buff *skb, int write_len)
37 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
40 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
43 /* remove VLAN header from packet and update csum accrodingly. */
44 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
47 struct vlan_ethhdr *veth;
50 err = make_writable(skb, VLAN_ETH_HLEN);
54 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
55 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
56 + ETH_HLEN, VLAN_HLEN, 0));
58 veth = (struct vlan_ethhdr *) skb->data;
59 *current_tci = veth->h_vlan_TCI;
61 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
63 eh = (struct ethhdr *)__skb_pull(skb, VLAN_HLEN);
65 skb->protocol = eh->h_proto;
66 skb->mac_header += VLAN_HLEN;
71 static int pop_vlan(struct sk_buff *skb)
76 if (likely(vlan_tx_tag_present(skb))) {
79 if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
80 skb->len < VLAN_ETH_HLEN))
83 err = __pop_vlan_tci(skb, &tci);
87 /* move next vlan tag to hw accel tag */
88 if (likely(skb->protocol != htons(ETH_P_8021Q) ||
89 skb->len < VLAN_ETH_HLEN))
92 err = __pop_vlan_tci(skb, &tci);
96 __vlan_hwaccel_put_tag(skb, ntohs(tci));
100 static int push_vlan(struct sk_buff *skb, const struct ovs_key_8021q *q_key)
102 if (unlikely(vlan_tx_tag_present(skb))) {
105 /* push down current VLAN tag */
106 current_tag = vlan_tx_tag_get(skb);
108 if (!__vlan_put_tag(skb, current_tag))
111 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
112 skb->csum = csum_add(skb->csum, csum_partial(skb->data
113 + ETH_HLEN, VLAN_HLEN, 0));
116 __vlan_hwaccel_put_tag(skb, ntohs(q_key->q_tci));
120 static int set_eth_addr(struct sk_buff *skb,
121 const struct ovs_key_ethernet *eth_key)
124 err = make_writable(skb, ETH_HLEN);
128 memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_HLEN);
129 memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_HLEN);
134 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
135 __be32 *addr, __be32 new_addr)
137 int transport_len = skb->len - skb_transport_offset(skb);
139 if (nh->protocol == IPPROTO_TCP) {
140 if (likely(transport_len >= sizeof(struct tcphdr)))
141 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
143 } else if (nh->protocol == IPPROTO_UDP) {
144 if (likely(transport_len >= sizeof(struct udphdr)))
145 inet_proto_csum_replace4(&udp_hdr(skb)->check, skb,
149 csum_replace4(&nh->check, *addr, new_addr);
150 skb_clear_rxhash(skb);
154 static void set_ip_tos(struct sk_buff *skb, struct iphdr *nh, u8 new_tos)
158 /* Set the DSCP bits and preserve the ECN bits. */
160 new = new_tos | (nh->tos & INET_ECN_MASK);
161 csum_replace4(&nh->check, (__force __be32)old,
162 (__force __be32)new);
166 static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
171 err = make_writable(skb, skb_network_offset(skb) +
172 sizeof(struct iphdr));
178 if (ipv4_key->ipv4_src != nh->saddr)
179 set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
181 if (ipv4_key->ipv4_dst != nh->daddr)
182 set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
184 if (ipv4_key->ipv4_tos != nh->tos)
185 set_ip_tos(skb, nh, ipv4_key->ipv4_tos);
190 /* Must follow make_writable() since that can move the skb data. */
191 static void set_tp_port(struct sk_buff *skb, __be16 *port,
192 __be16 new_port, __sum16 *check)
194 inet_proto_csum_replace2(check, skb, *port, new_port, 0);
196 skb_clear_rxhash(skb);
199 static int set_udp_port(struct sk_buff *skb,
200 const struct ovs_key_udp *udp_port_key)
205 err = make_writable(skb, skb_transport_offset(skb) +
206 sizeof(struct udphdr));
211 if (udp_port_key->udp_src != uh->source)
212 set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check);
214 if (udp_port_key->udp_dst != uh->dest)
215 set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check);
220 static int set_tcp_port(struct sk_buff *skb,
221 const struct ovs_key_tcp *tcp_port_key)
226 err = make_writable(skb, skb_transport_offset(skb) +
227 sizeof(struct tcphdr));
232 if (tcp_port_key->tcp_src != th->source)
233 set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
235 if (tcp_port_key->tcp_dst != th->dest)
236 set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
241 static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
248 vport = rcu_dereference(dp->ports[out_port]);
249 if (unlikely(!vport)) {
254 vport_send(vport, skb);
258 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
259 const struct nlattr *attr)
261 struct dp_upcall_info upcall;
262 const struct nlattr *a;
265 upcall.cmd = OVS_PACKET_CMD_ACTION;
266 upcall.key = &OVS_CB(skb)->flow->key;
267 upcall.userdata = NULL;
270 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
271 a = nla_next(a, &rem)) {
272 switch (nla_type(a)) {
273 case OVS_USERSPACE_ATTR_USERDATA:
277 case OVS_USERSPACE_ATTR_PID:
278 upcall.pid = nla_get_u32(a);
283 return dp_upcall(dp, skb, &upcall);
286 static int sample(struct datapath *dp, struct sk_buff *skb,
287 const struct nlattr *attr)
289 const struct nlattr *acts_list = NULL;
290 const struct nlattr *a;
293 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
294 a = nla_next(a, &rem)) {
295 switch (nla_type(a)) {
296 case OVS_SAMPLE_ATTR_PROBABILITY:
297 if (net_random() >= nla_get_u32(a))
301 case OVS_SAMPLE_ATTR_ACTIONS:
307 return do_execute_actions(dp, skb, nla_data(acts_list),
308 nla_len(acts_list), true);
311 static int execute_set_action(struct sk_buff *skb,
312 const struct nlattr *nested_attr)
316 switch (nla_type(nested_attr)) {
317 case OVS_KEY_ATTR_TUN_ID:
318 OVS_CB(skb)->tun_id = nla_get_be64(nested_attr);
322 case OVS_KEY_ATTR_ETHERNET:
323 err = set_eth_addr(skb, nla_data(nested_attr));
326 case OVS_KEY_ATTR_IPV4:
327 err = set_ipv4(skb, nla_data(nested_attr));
330 case OVS_KEY_ATTR_TCP:
331 err = set_tcp_port(skb, nla_data(nested_attr));
334 case OVS_KEY_ATTR_UDP:
335 err = set_udp_port(skb, nla_data(nested_attr));
341 /* Execute a list of actions against 'skb'. */
342 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
343 const struct nlattr *attr, int len, bool keep_skb)
345 /* Every output action needs a separate clone of 'skb', but the common
346 * case is just a single output action, so that doing a clone and
347 * then freeing the original skbuff is wasteful. So the following code
348 * is slightly obscure just to avoid that. */
350 u32 priority = skb->priority;
351 const struct nlattr *a;
354 for (a = attr, rem = len; rem > 0;
355 a = nla_next(a, &rem)) {
358 if (prev_port != -1) {
359 do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
363 switch (nla_type(a)) {
364 case OVS_ACTION_ATTR_OUTPUT:
365 prev_port = nla_get_u32(a);
368 case OVS_ACTION_ATTR_USERSPACE:
369 output_userspace(dp, skb, a);
372 case OVS_ACTION_ATTR_PUSH:
373 /* Only supported push action is on vlan tag. */
374 err = push_vlan(skb, nla_data(nla_data(a)));
375 if (unlikely(err)) /* skb already freed. */
379 case OVS_ACTION_ATTR_POP:
380 /* Only supported pop action is on vlan tag. */
384 case OVS_ACTION_ATTR_SET:
385 err = execute_set_action(skb, nla_data(a));
388 case OVS_ACTION_ATTR_SET_PRIORITY:
389 skb->priority = nla_get_u32(a);
392 case OVS_ACTION_ATTR_POP_PRIORITY:
393 skb->priority = priority;
396 case OVS_ACTION_ATTR_SAMPLE:
397 err = sample(dp, skb, a);
407 if (prev_port != -1) {
409 skb = skb_clone(skb, GFP_ATOMIC);
411 do_output(dp, skb, prev_port);
412 } else if (!keep_skb)
418 /* We limit the number of times that we pass into execute_actions()
419 * to avoid blowing out the stack in the event that we have a loop. */
422 struct loop_counter {
423 u8 count; /* Count. */
424 bool looping; /* Loop detected? */
427 static DEFINE_PER_CPU(struct loop_counter, loop_counters);
429 static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
432 pr_warn("%s: flow looped %d times, dropping\n",
433 dp_name(dp), MAX_LOOPS);
434 actions->actions_len = 0;
438 /* Execute a list of actions against 'skb'. */
439 int execute_actions(struct datapath *dp, struct sk_buff *skb)
441 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
442 struct loop_counter *loop;
445 /* Check whether we've looped too much. */
446 loop = &__get_cpu_var(loop_counters);
447 if (unlikely(++loop->count > MAX_LOOPS))
448 loop->looping = true;
449 if (unlikely(loop->looping)) {
450 error = loop_suppress(dp, acts);
455 OVS_CB(skb)->tun_id = 0;
456 error = do_execute_actions(dp, skb, acts->actions,
457 acts->actions_len, false);
459 /* Check whether sub-actions looped too much. */
460 if (unlikely(loop->looping))
461 error = loop_suppress(dp, acts);
464 /* Decrement loop counter. */
466 loop->looping = false;