2 * Copyright (c) 2007-2011 Nicira Networks.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/tcp.h>
26 #include <linux/udp.h>
27 #include <linux/in6.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_vlan.h>
31 #include <net/checksum.h>
32 #include <net/dsfield.h>
39 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
40 const struct nlattr *attr, int len, bool keep_skb);
42 static int make_writable(struct sk_buff *skb, int write_len)
44 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
47 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
50 /* remove VLAN header from packet and update csum accrodingly. */
51 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
53 struct vlan_hdr *vhdr;
56 err = make_writable(skb, VLAN_ETH_HLEN);
60 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
61 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
62 + ETH_HLEN, VLAN_HLEN, 0));
64 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
65 *current_tci = vhdr->h_vlan_TCI;
67 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
68 __skb_pull(skb, VLAN_HLEN);
70 vlan_set_encap_proto(skb, vhdr);
71 skb->mac_header += VLAN_HLEN;
72 skb_reset_mac_len(skb);
77 static int pop_vlan(struct sk_buff *skb)
82 if (likely(vlan_tx_tag_present(skb))) {
85 if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
86 skb->len < VLAN_ETH_HLEN))
89 err = __pop_vlan_tci(skb, &tci);
93 /* move next vlan tag to hw accel tag */
94 if (likely(skb->protocol != htons(ETH_P_8021Q) ||
95 skb->len < VLAN_ETH_HLEN))
98 err = __pop_vlan_tci(skb, &tci);
102 __vlan_hwaccel_put_tag(skb, ntohs(tci));
106 static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
108 if (unlikely(vlan_tx_tag_present(skb))) {
111 /* push down current VLAN tag */
112 current_tag = vlan_tx_tag_get(skb);
114 if (!__vlan_put_tag(skb, current_tag))
117 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
118 skb->csum = csum_add(skb->csum, csum_partial(skb->data
119 + ETH_HLEN, VLAN_HLEN, 0));
122 __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
126 static int set_eth_addr(struct sk_buff *skb,
127 const struct ovs_key_ethernet *eth_key)
130 err = make_writable(skb, ETH_HLEN);
134 memcpy(eth_hdr(skb)->h_source, eth_key->eth_src, ETH_ALEN);
135 memcpy(eth_hdr(skb)->h_dest, eth_key->eth_dst, ETH_ALEN);
140 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
141 __be32 *addr, __be32 new_addr)
143 int transport_len = skb->len - skb_transport_offset(skb);
145 if (nh->protocol == IPPROTO_TCP) {
146 if (likely(transport_len >= sizeof(struct tcphdr)))
147 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
149 } else if (nh->protocol == IPPROTO_UDP) {
150 if (likely(transport_len >= sizeof(struct udphdr)))
151 inet_proto_csum_replace4(&udp_hdr(skb)->check, skb,
155 csum_replace4(&nh->check, *addr, new_addr);
156 skb_clear_rxhash(skb);
160 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
162 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
166 static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
171 err = make_writable(skb, skb_network_offset(skb) +
172 sizeof(struct iphdr));
178 if (ipv4_key->ipv4_src != nh->saddr)
179 set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
181 if (ipv4_key->ipv4_dst != nh->daddr)
182 set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
184 if (ipv4_key->ipv4_tos != nh->tos)
185 ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
187 if (ipv4_key->ipv4_ttl != nh->ttl)
188 set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
193 /* Must follow make_writable() since that can move the skb data. */
194 static void set_tp_port(struct sk_buff *skb, __be16 *port,
195 __be16 new_port, __sum16 *check)
197 inet_proto_csum_replace2(check, skb, *port, new_port, 0);
199 skb_clear_rxhash(skb);
202 static int set_udp_port(struct sk_buff *skb,
203 const struct ovs_key_udp *udp_port_key)
208 err = make_writable(skb, skb_transport_offset(skb) +
209 sizeof(struct udphdr));
214 if (udp_port_key->udp_src != uh->source)
215 set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check);
217 if (udp_port_key->udp_dst != uh->dest)
218 set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check);
223 static int set_tcp_port(struct sk_buff *skb,
224 const struct ovs_key_tcp *tcp_port_key)
229 err = make_writable(skb, skb_transport_offset(skb) +
230 sizeof(struct tcphdr));
235 if (tcp_port_key->tcp_src != th->source)
236 set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
238 if (tcp_port_key->tcp_dst != th->dest)
239 set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
244 static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
251 vport = rcu_dereference(dp->ports[out_port]);
252 if (unlikely(!vport)) {
257 ovs_vport_send(vport, skb);
261 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
262 const struct nlattr *attr)
264 struct dp_upcall_info upcall;
265 const struct nlattr *a;
268 upcall.cmd = OVS_PACKET_CMD_ACTION;
269 upcall.key = &OVS_CB(skb)->flow->key;
270 upcall.userdata = NULL;
273 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
274 a = nla_next(a, &rem)) {
275 switch (nla_type(a)) {
276 case OVS_USERSPACE_ATTR_USERDATA:
280 case OVS_USERSPACE_ATTR_PID:
281 upcall.pid = nla_get_u32(a);
286 return ovs_dp_upcall(dp, skb, &upcall);
289 static int sample(struct datapath *dp, struct sk_buff *skb,
290 const struct nlattr *attr)
292 const struct nlattr *acts_list = NULL;
293 const struct nlattr *a;
296 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
297 a = nla_next(a, &rem)) {
298 switch (nla_type(a)) {
299 case OVS_SAMPLE_ATTR_PROBABILITY:
300 if (net_random() >= nla_get_u32(a))
304 case OVS_SAMPLE_ATTR_ACTIONS:
310 return do_execute_actions(dp, skb, nla_data(acts_list),
311 nla_len(acts_list), true);
314 static int execute_set_action(struct sk_buff *skb,
315 const struct nlattr *nested_attr)
319 switch (nla_type(nested_attr)) {
320 case OVS_KEY_ATTR_PRIORITY:
321 skb->priority = nla_get_u32(nested_attr);
324 case OVS_KEY_ATTR_TUN_ID:
325 OVS_CB(skb)->tun_id = nla_get_be64(nested_attr);
328 case OVS_KEY_ATTR_ETHERNET:
329 err = set_eth_addr(skb, nla_data(nested_attr));
332 case OVS_KEY_ATTR_IPV4:
333 err = set_ipv4(skb, nla_data(nested_attr));
336 case OVS_KEY_ATTR_TCP:
337 err = set_tcp_port(skb, nla_data(nested_attr));
340 case OVS_KEY_ATTR_UDP:
341 err = set_udp_port(skb, nla_data(nested_attr));
348 /* Execute a list of actions against 'skb'. */
349 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
350 const struct nlattr *attr, int len, bool keep_skb)
352 /* Every output action needs a separate clone of 'skb', but the common
353 * case is just a single output action, so that doing a clone and
354 * then freeing the original skbuff is wasteful. So the following code
355 * is slightly obscure just to avoid that. */
357 const struct nlattr *a;
360 for (a = attr, rem = len; rem > 0;
361 a = nla_next(a, &rem)) {
364 if (prev_port != -1) {
365 do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
369 switch (nla_type(a)) {
370 case OVS_ACTION_ATTR_OUTPUT:
371 prev_port = nla_get_u32(a);
374 case OVS_ACTION_ATTR_USERSPACE:
375 output_userspace(dp, skb, a);
378 case OVS_ACTION_ATTR_PUSH_VLAN:
379 err = push_vlan(skb, nla_data(a));
380 if (unlikely(err)) /* skb already freed. */
384 case OVS_ACTION_ATTR_POP_VLAN:
388 case OVS_ACTION_ATTR_SET:
389 err = execute_set_action(skb, nla_data(a));
392 case OVS_ACTION_ATTR_SAMPLE:
393 err = sample(dp, skb, a);
403 if (prev_port != -1) {
405 skb = skb_clone(skb, GFP_ATOMIC);
407 do_output(dp, skb, prev_port);
408 } else if (!keep_skb)
414 /* We limit the number of times that we pass into execute_actions()
415 * to avoid blowing out the stack in the event that we have a loop. */
418 struct loop_counter {
419 u8 count; /* Count. */
420 bool looping; /* Loop detected? */
423 static DEFINE_PER_CPU(struct loop_counter, loop_counters);
425 static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
428 pr_warn("%s: flow looped %d times, dropping\n",
429 ovs_dp_name(dp), MAX_LOOPS);
430 actions->actions_len = 0;
434 /* Execute a list of actions against 'skb'. */
435 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
437 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
438 struct loop_counter *loop;
441 /* Check whether we've looped too much. */
442 loop = &__get_cpu_var(loop_counters);
443 if (unlikely(++loop->count > MAX_LOOPS))
444 loop->looping = true;
445 if (unlikely(loop->looping)) {
446 error = loop_suppress(dp, acts);
451 OVS_CB(skb)->tun_id = 0;
452 error = do_execute_actions(dp, skb, acts->actions,
453 acts->actions_len, false);
455 /* Check whether sub-actions looped too much. */
456 if (unlikely(loop->looping))
457 error = loop_suppress(dp, acts);
460 /* Decrement loop counter. */
462 loop->looping = false;