2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for executing flow actions. */
11 #include <linux/skbuff.h>
14 #include <linux/tcp.h>
15 #include <linux/udp.h>
16 #include <linux/in6.h>
17 #include <linux/if_arp.h>
18 #include <linux/if_vlan.h>
19 #include <net/inet_ecn.h>
21 #include <net/checksum.h>
26 #include "openvswitch/datapath-protocol.h"
29 static int do_execute_actions(struct datapath *, struct sk_buff *,
30 const struct sw_flow_key *,
31 const struct nlattr *actions, u32 actions_len);
33 static struct sk_buff *make_writable(struct sk_buff *skb, unsigned min_headroom)
35 if (skb_cloned(skb)) {
37 unsigned headroom = max(min_headroom, skb_headroom(skb));
39 nskb = skb_copy_expand(skb, headroom, skb_tailroom(skb), GFP_ATOMIC);
41 set_skb_csum_bits(skb, nskb);
46 unsigned int hdr_len = (skb_transport_offset(skb)
47 + sizeof(struct tcphdr));
48 if (pskb_may_pull(skb, min(hdr_len, skb->len)))
55 static struct sk_buff *vlan_pull_tag(struct sk_buff *skb)
57 struct vlan_ethhdr *vh = vlan_eth_hdr(skb);
60 /* Verify we were given a vlan packet */
61 if (vh->h_vlan_proto != htons(ETH_P_8021Q) || skb->len < VLAN_ETH_HLEN)
64 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
65 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
66 + ETH_HLEN, VLAN_HLEN, 0));
68 memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN);
70 eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
72 skb->protocol = eh->h_proto;
73 skb->mac_header += VLAN_HLEN;
78 static struct sk_buff *modify_vlan_tci(struct datapath *dp, struct sk_buff *skb,
79 const struct sw_flow_key *key,
80 const struct nlattr *a, u32 actions_len)
82 __be16 tci = nla_get_be16(a);
84 skb = make_writable(skb, VLAN_HLEN);
86 return ERR_PTR(-ENOMEM);
88 if (skb->protocol == htons(ETH_P_8021Q)) {
89 /* Modify vlan id, but maintain other TCI values */
90 struct vlan_ethhdr *vh;
93 if (skb->len < VLAN_ETH_HLEN)
96 vh = vlan_eth_hdr(skb);
97 old_tci = vh->h_vlan_TCI;
101 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) {
102 __be16 diff[] = { ~old_tci, vh->h_vlan_TCI };
104 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
110 /* Add vlan header */
112 /* Set up checksumming pointers for checksum-deferred packets
113 * on Xen. Otherwise, dev_queue_xmit() will try to do this
114 * when we send the packet out on the wire, and it will fail at
115 * that point because skb_checksum_setup() will not look inside
116 * an 802.1Q header. */
117 err = vswitch_skb_checksum_setup(skb);
123 /* GSO is not implemented for packets with an 802.1Q header, so
124 * we have to do segmentation before we add that header.
126 * GSO does work with hardware-accelerated VLAN tagging, but we
127 * can't use hardware-accelerated VLAN tagging since it
128 * requires the device to have a VLAN group configured (with
129 * e.g. vconfig(8)) and we don't do that.
131 * Having to do this here may be a performance loss, since we
132 * can't take advantage of TSO hardware support, although it
133 * does not make a measurable network performance difference
134 * for 1G Ethernet. Fixing that would require patching the
135 * kernel (either to add GSO support to the VLAN protocol or to
136 * support hardware-accelerated VLAN tagging without VLAN
137 * groups configured). */
138 if (skb_is_gso(skb)) {
139 const struct nlattr *actions_left;
140 int actions_len_left;
141 struct sk_buff *segs;
143 segs = skb_gso_segment(skb, 0);
146 return ERR_CAST(segs);
148 actions_len_left = actions_len;
149 actions_left = nla_next(a, &actions_len_left);
152 struct sk_buff *nskb = segs->next;
156 /* GSO can change the checksum type so update.*/
157 compute_ip_summed(segs, true);
159 segs = __vlan_put_tag(segs, ntohs(tci));
162 err = do_execute_actions(
163 dp, segs, key, actions_left,
168 while ((segs = nskb)) {
177 } while (segs->next);
180 compute_ip_summed(skb, true);
183 /* The hardware-accelerated version of vlan_put_tag() works
184 * only for a device that has a VLAN group configured (with
185 * e.g. vconfig(8)), so call the software-only version
186 * __vlan_put_tag() directly instead.
188 skb = __vlan_put_tag(skb, ntohs(tci));
190 return ERR_PTR(-ENOMEM);
192 /* GSO doesn't fix up the hardware computed checksum so this
193 * will only be hit in the non-GSO case. */
194 if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
195 skb->csum = csum_add(skb->csum, csum_partial(skb->data
196 + ETH_HLEN, VLAN_HLEN, 0));
202 static struct sk_buff *strip_vlan(struct sk_buff *skb)
204 skb = make_writable(skb, 0);
210 static bool is_ip(struct sk_buff *skb, const struct sw_flow_key *key)
212 return (key->dl_type == htons(ETH_P_IP) &&
213 skb->transport_header > skb->network_header);
216 static __sum16 *get_l4_checksum(struct sk_buff *skb, const struct sw_flow_key *key)
218 int transport_len = skb->len - skb_transport_offset(skb);
219 if (key->nw_proto == IPPROTO_TCP) {
220 if (likely(transport_len >= sizeof(struct tcphdr)))
221 return &tcp_hdr(skb)->check;
222 } else if (key->nw_proto == IPPROTO_UDP) {
223 if (likely(transport_len >= sizeof(struct udphdr)))
224 return &udp_hdr(skb)->check;
229 static struct sk_buff *set_nw_addr(struct sk_buff *skb,
230 const struct sw_flow_key *key,
231 const struct nlattr *a)
233 __be32 new_nwaddr = nla_get_be32(a);
238 if (unlikely(!is_ip(skb, key)))
241 skb = make_writable(skb, 0);
246 nwaddr = nla_type(a) == ODPAT_SET_NW_SRC ? &nh->saddr : &nh->daddr;
248 check = get_l4_checksum(skb, key);
250 inet_proto_csum_replace4(check, skb, *nwaddr, new_nwaddr, 1);
251 csum_replace4(&nh->check, *nwaddr, new_nwaddr);
253 *nwaddr = new_nwaddr;
258 static struct sk_buff *set_nw_tos(struct sk_buff *skb,
259 const struct sw_flow_key *key,
262 if (unlikely(!is_ip(skb, key)))
265 skb = make_writable(skb, 0);
267 struct iphdr *nh = ip_hdr(skb);
272 /* Set the DSCP bits and preserve the ECN bits. */
273 new = nw_tos | (nh->tos & INET_ECN_MASK);
274 csum_replace4(&nh->check, (__force __be32)old,
275 (__force __be32)new);
281 static struct sk_buff *set_tp_port(struct sk_buff *skb,
282 const struct sw_flow_key *key,
283 const struct nlattr *a)
289 if (unlikely(!is_ip(skb, key)))
292 skb = make_writable(skb, 0);
296 /* Must follow make_writable() since that can move the skb data. */
297 check = get_l4_checksum(skb, key);
298 if (unlikely(!check))
302 * Update port and checksum.
304 * This is OK because source and destination port numbers are at the
305 * same offsets in both UDP and TCP headers, and get_l4_checksum() only
306 * supports those protocols.
309 port = nla_type(a) == ODPAT_SET_TP_SRC ? &th->source : &th->dest;
310 inet_proto_csum_replace2(check, skb, *port, nla_get_be16(a), 0);
311 *port = nla_get_be16(a);
317 * is_spoofed_arp - check for invalid ARP packet
319 * @skb: skbuff containing an Ethernet packet, with network header pointing
320 * just past the Ethernet and optional 802.1Q header.
321 * @key: flow key extracted from @skb by flow_extract()
323 * Returns true if @skb is an invalid Ethernet+IPv4 ARP packet: one with screwy
324 * or truncated header fields or one whose inner and outer Ethernet address
327 static bool is_spoofed_arp(struct sk_buff *skb, const struct sw_flow_key *key)
329 struct arp_eth_header *arp;
331 if (key->dl_type != htons(ETH_P_ARP))
334 if (skb_network_offset(skb) + sizeof(struct arp_eth_header) > skb->len)
337 arp = (struct arp_eth_header *)skb_network_header(skb);
338 return (arp->ar_hrd != htons(ARPHRD_ETHER) ||
339 arp->ar_pro != htons(ETH_P_IP) ||
340 arp->ar_hln != ETH_ALEN ||
342 compare_ether_addr(arp->ar_sha, eth_hdr(skb)->h_source));
345 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
352 p = rcu_dereference(dp->ports[out_port]);
363 static int output_control(struct datapath *dp, struct sk_buff *skb, u64 arg)
365 skb = skb_clone(skb, GFP_ATOMIC);
368 return dp_output_control(dp, skb, _ODPL_ACTION_NR, arg);
371 /* Execute a list of actions against 'skb'. */
372 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
373 const struct sw_flow_key *key,
374 const struct nlattr *actions, u32 actions_len)
376 /* Every output action needs a separate clone of 'skb', but the common
377 * case is just a single output action, so that doing a clone and
378 * then freeing the original skbuff is wasteful. So the following code
379 * is slightly obscure just to avoid that. */
381 u32 priority = skb->priority;
382 const struct nlattr *a;
385 for (a = actions, rem = actions_len; rem > 0; a = nla_next(a, &rem)) {
386 if (prev_port != -1) {
387 do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
391 switch (nla_type(a)) {
393 prev_port = nla_get_u32(a);
396 case ODPAT_CONTROLLER:
397 err = output_control(dp, skb, nla_get_u64(a));
404 case ODPAT_SET_TUNNEL:
405 OVS_CB(skb)->tun_id = nla_get_be64(a);
408 case ODPAT_SET_DL_TCI:
409 skb = modify_vlan_tci(dp, skb, key, a, rem);
414 case ODPAT_STRIP_VLAN:
415 skb = strip_vlan(skb);
418 case ODPAT_SET_DL_SRC:
419 skb = make_writable(skb, 0);
422 memcpy(eth_hdr(skb)->h_source, nla_data(a), ETH_ALEN);
425 case ODPAT_SET_DL_DST:
426 skb = make_writable(skb, 0);
429 memcpy(eth_hdr(skb)->h_dest, nla_data(a), ETH_ALEN);
432 case ODPAT_SET_NW_SRC:
433 case ODPAT_SET_NW_DST:
434 skb = set_nw_addr(skb, key, a);
437 case ODPAT_SET_NW_TOS:
438 skb = set_nw_tos(skb, key, nla_get_u8(a));
441 case ODPAT_SET_TP_SRC:
442 case ODPAT_SET_TP_DST:
443 skb = set_tp_port(skb, key, a);
446 case ODPAT_SET_PRIORITY:
447 skb->priority = nla_get_u32(a);
450 case ODPAT_POP_PRIORITY:
451 skb->priority = priority;
454 case ODPAT_DROP_SPOOFED_ARP:
455 if (unlikely(is_spoofed_arp(skb, key)))
464 do_output(dp, skb, prev_port);
470 /* Send a copy of this packet up to the sFlow agent, along with extra
471 * information about what happened to it. */
472 static void sflow_sample(struct datapath *dp, struct sk_buff *skb,
473 const struct nlattr *a, u32 actions_len,
476 struct odp_sflow_sample_header *hdr;
477 unsigned int hdrlen = sizeof(struct odp_sflow_sample_header);
478 struct sk_buff *nskb;
480 nskb = skb_copy_expand(skb, actions_len + hdrlen, 0, GFP_ATOMIC);
484 memcpy(__skb_push(nskb, actions_len), a, actions_len);
485 hdr = (struct odp_sflow_sample_header*)__skb_push(nskb, hdrlen);
486 hdr->actions_len = actions_len;
487 hdr->sample_pool = atomic_read(&vport->sflow_pool);
488 dp_output_control(dp, nskb, _ODPL_SFLOW_NR, 0);
491 /* Execute a list of actions against 'skb'. */
492 int execute_actions(struct datapath *dp, struct sk_buff *skb,
493 const struct sw_flow_key *key,
494 const struct nlattr *actions, u32 actions_len)
496 if (dp->sflow_probability) {
497 struct vport *p = OVS_CB(skb)->vport;
499 atomic_inc(&p->sflow_pool);
500 if (dp->sflow_probability == UINT_MAX ||
501 net_random() < dp->sflow_probability)
502 sflow_sample(dp, skb, actions, actions_len, p);
506 OVS_CB(skb)->tun_id = 0;
508 return do_execute_actions(dp, skb, key, actions, actions_len);