X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=datapath%2Factions.c;h=a28e98662db314b23486b48d1f321c1b1a7f1aed;hb=cb4ef1ea12789af09fe9a6634012cf0f3797a56a;hp=8a3e8abb549e1302bdbed626bd889728bf2854f5;hpb=00908dc27a4d93bd1c5bda3bcdc84ec351e9a09a;p=openvswitch diff --git a/datapath/actions.c b/datapath/actions.c index 8a3e8abb..a28e9866 100644 --- a/datapath/actions.c +++ b/datapath/actions.c @@ -1,6 +1,6 @@ /* * Distributed under the terms of the GNU GPL version 2. - * Copyright (c) 2007, 2008, 2009 Nicira Networks. + * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks. * * Significant portions of this file may be copied from parts of the Linux * kernel, by Linus Torvalds and others. @@ -8,417 +8,446 @@ /* Functions for executing flow actions. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include +#include #include #include #include +#include #include +#include #include #include -#include "datapath.h" -#include "dp_dev.h" + #include "actions.h" -#include "openvswitch/datapath-protocol.h" +#include "checksum.h" +#include "datapath.h" +#include "vlan.h" +#include "vport.h" -struct sk_buff * -make_writable(struct sk_buff *skb, gfp_t gfp) +static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, + const struct nlattr *attr, int len, bool keep_skb); + +static int make_writable(struct sk_buff *skb, int write_len) { - if (skb_shared(skb) || skb_cloned(skb)) { - struct sk_buff *nskb = skb_copy(skb, gfp); - if (nskb) { - kfree_skb(skb); - return nskb; - } - } else { - unsigned int hdr_len = (skb_transport_offset(skb) - + sizeof(struct tcphdr)); - if (pskb_may_pull(skb, min(hdr_len, skb->len))) - return skb; - } - kfree_skb(skb); - return NULL; -} + if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) + return 0; + return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +} -static struct sk_buff * -vlan_pull_tag(struct sk_buff *skb) +/* remove VLAN header from packet and update csum accrodingly. */ +static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) { - struct vlan_ethhdr *vh = vlan_eth_hdr(skb); struct ethhdr *eh; + struct vlan_ethhdr *veth; + int err; + + err = make_writable(skb, VLAN_ETH_HLEN); + if (unlikely(err)) + return err; + if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) + skb->csum = csum_sub(skb->csum, csum_partial(skb->data + + ETH_HLEN, VLAN_HLEN, 0)); - /* Verify we were given a vlan packet */ - if (vh->h_vlan_proto != htons(ETH_P_8021Q)) - return skb; + veth = (struct vlan_ethhdr *) skb->data; + *current_tci = veth->h_vlan_TCI; - memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN); + memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); - eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); + eh = (struct ethhdr *)__skb_pull(skb, VLAN_HLEN); skb->protocol = eh->h_proto; skb->mac_header += VLAN_HLEN; - return skb; + return 0; } - -static struct sk_buff * -modify_vlan_tci(struct datapath *dp, struct sk_buff *skb, - struct odp_flow_key *key, const union odp_action *a, - int n_actions, gfp_t gfp) +static int pop_vlan(struct sk_buff *skb) { - u16 tci, mask; - - if (a->type == ODPAT_SET_VLAN_VID) { - tci = ntohs(a->vlan_vid.vlan_vid); - mask = VLAN_VID_MASK; - key->dl_vlan = htons(tci & mask); - } else { - tci = a->vlan_pcp.vlan_pcp << 13; - mask = VLAN_PCP_MASK; - } - - skb = make_writable(skb, gfp); - if (!skb) - return ERR_PTR(-ENOMEM); + __be16 tci; + int err; - if (skb->protocol == htons(ETH_P_8021Q)) { - /* Modify vlan id, but maintain other TCI values */ - struct vlan_ethhdr *vh = vlan_eth_hdr(skb); - vh->h_vlan_TCI = htons((ntohs(vh->h_vlan_TCI) & ~mask) | tci); + if (likely(vlan_tx_tag_present(skb))) { + vlan_set_tci(skb, 0); } else { - /* Add vlan header */ - - /* Set up checksumming pointers for checksum-deferred packets - * on Xen. Otherwise, dev_queue_xmit() will try to do this - * when we send the packet out on the wire, and it will fail at - * that point because skb_checksum_setup() will not look inside - * an 802.1Q header. */ - vswitch_skb_checksum_setup(skb); - - /* GSO is not implemented for packets with an 802.1Q header, so - * we have to do segmentation before we add that header. - * - * GSO does work with hardware-accelerated VLAN tagging, but we - * can't use hardware-accelerated VLAN tagging since it - * requires the device to have a VLAN group configured (with - * e.g. vconfig(8)) and we don't do that. - * - * Having to do this here may be a performance loss, since we - * can't take advantage of TSO hardware support, although it - * does not make a measurable network performance difference - * for 1G Ethernet. Fixing that would require patching the - * kernel (either to add GSO support to the VLAN protocol or to - * support hardware-accelerated VLAN tagging without VLAN - * groups configured). */ - if (skb_is_gso(skb)) { - struct sk_buff *segs; - - segs = skb_gso_segment(skb, 0); - kfree_skb(skb); - if (unlikely(IS_ERR(segs))) - return ERR_CAST(segs); - - do { - struct sk_buff *nskb = segs->next; - int err; - - segs->next = NULL; - - segs = __vlan_put_tag(segs, tci); - err = -ENOMEM; - if (segs) { - struct odp_flow_key segkey = *key; - err = execute_actions(dp, segs, - &segkey, a + 1, - n_actions - 1, - gfp); - } - - if (unlikely(err)) { - while ((segs = nskb)) { - nskb = segs->next; - segs->next = NULL; - kfree_skb(segs); - } - return ERR_PTR(err); - } - - segs = nskb; - } while (segs->next); - - skb = segs; - } + if (unlikely(skb->protocol != htons(ETH_P_8021Q) || + skb->len < VLAN_ETH_HLEN)) + return 0; - /* The hardware-accelerated version of vlan_put_tag() works - * only for a device that has a VLAN group configured (with - * e.g. vconfig(8)), so call the software-only version - * __vlan_put_tag() directly instead. - */ - skb = __vlan_put_tag(skb, tci); - if (!skb) - return ERR_PTR(-ENOMEM); + err = __pop_vlan_tci(skb, &tci); + if (err) + return err; } + /* move next vlan tag to hw accel tag */ + if (likely(skb->protocol != htons(ETH_P_8021Q) || + skb->len < VLAN_ETH_HLEN)) + return 0; - return skb; -} + err = __pop_vlan_tci(skb, &tci); + if (unlikely(err)) + return err; -static struct sk_buff *strip_vlan(struct sk_buff *skb, - struct odp_flow_key *key, gfp_t gfp) -{ - skb = make_writable(skb, gfp); - if (skb) { - vlan_pull_tag(skb); - key->dl_vlan = htons(ODP_VLAN_NONE); - } - return skb; + __vlan_hwaccel_put_tag(skb, ntohs(tci)); + return 0; } -static struct sk_buff *set_dl_addr(struct sk_buff *skb, - const struct odp_action_dl_addr *a, - gfp_t gfp) +static int push_vlan(struct sk_buff *skb, __be16 new_tci) { - skb = make_writable(skb, gfp); - if (skb) { - struct ethhdr *eh = eth_hdr(skb); - memcpy(a->type == ODPAT_SET_DL_SRC ? eh->h_source : eh->h_dest, - a->dl_addr, ETH_ALEN); + if (unlikely(vlan_tx_tag_present(skb))) { + u16 current_tag; + + /* push down current VLAN tag */ + current_tag = vlan_tx_tag_get(skb); + + if (!__vlan_put_tag(skb, current_tag)) + return -ENOMEM; + + if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) + skb->csum = csum_add(skb->csum, csum_partial(skb->data + + ETH_HLEN, VLAN_HLEN, 0)); + } - return skb; + __vlan_hwaccel_put_tag(skb, ntohs(new_tci)); + return 0; } -/* Updates 'sum', which is a field in 'skb''s data, given that a 4-byte field - * covered by the sum has been changed from 'from' to 'to'. If set, - * 'pseudohdr' indicates that the field is in the TCP or UDP pseudo-header. - * Based on nf_proto_csum_replace4. */ -static void update_csum(__sum16 *sum, struct sk_buff *skb, - __be32 from, __be32 to, int pseudohdr) +static bool is_ip(struct sk_buff *skb) { - __be32 diff[] = { ~from, to }; - if (skb->ip_summed != CHECKSUM_PARTIAL) { - *sum = csum_fold(csum_partial((char *)diff, sizeof(diff), - ~csum_unfold(*sum))); - if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr) - skb->csum = ~csum_partial((char *)diff, sizeof(diff), - ~skb->csum); - } else if (pseudohdr) - *sum = ~csum_fold(csum_partial((char *)diff, sizeof(diff), - csum_unfold(*sum))); + return (OVS_CB(skb)->flow->key.eth.type == htons(ETH_P_IP) && + skb->transport_header > skb->network_header); } -static struct sk_buff *set_nw_addr(struct sk_buff *skb, - struct odp_flow_key *key, - const struct odp_action_nw_addr *a, - gfp_t gfp) +static __sum16 *get_l4_checksum(struct sk_buff *skb) { - if (key->dl_type != htons(ETH_P_IP)) - return skb; - - skb = make_writable(skb, gfp); - if (skb) { - struct iphdr *nh = ip_hdr(skb); - u32 *f = a->type == ODPAT_SET_NW_SRC ? &nh->saddr : &nh->daddr; - u32 old = *f; - u32 new = a->nw_addr; - - if (key->nw_proto == IPPROTO_TCP) { - struct tcphdr *th = tcp_hdr(skb); - update_csum(&th->check, skb, old, new, 1); - } else if (key->nw_proto == IPPROTO_UDP) { - struct udphdr *th = udp_hdr(skb); - update_csum(&th->check, skb, old, new, 1); - } - update_csum(&nh->check, skb, old, new, 0); - *f = new; + u8 nw_proto = OVS_CB(skb)->flow->key.ip.proto; + int transport_len = skb->len - skb_transport_offset(skb); + if (nw_proto == IPPROTO_TCP) { + if (likely(transport_len >= sizeof(struct tcphdr))) + return &tcp_hdr(skb)->check; + } else if (nw_proto == IPPROTO_UDP) { + if (likely(transport_len >= sizeof(struct udphdr))) + return &udp_hdr(skb)->check; } - return skb; + return NULL; } -static struct sk_buff * -set_tp_port(struct sk_buff *skb, struct odp_flow_key *key, - const struct odp_action_tp_port *a, - gfp_t gfp) +static int set_nw_addr(struct sk_buff *skb, const struct nlattr *a) { - int check_ofs; - - if (key->dl_type != htons(ETH_P_IP)) - return skb; - - if (key->nw_proto == IPPROTO_TCP) - check_ofs = offsetof(struct tcphdr, check); - else if (key->nw_proto == IPPROTO_UDP) - check_ofs = offsetof(struct udphdr, check); - else - return skb; - - skb = make_writable(skb, gfp); - if (skb) { - struct udphdr *th = udp_hdr(skb); - u16 *f = a->type == ODPAT_SET_TP_SRC ? &th->source : &th->dest; - u16 old = *f; - u16 new = a->tp_port; - update_csum((u16*)((u8*)skb->data + check_ofs), - skb, old, new, 1); - *f = new; - } - return skb; -} + __be32 new_nwaddr = nla_get_be32(a); + struct iphdr *nh; + __sum16 *check; + __be32 *nwaddr; + int err; -static inline unsigned packet_length(const struct sk_buff *skb) -{ - unsigned length = skb->len - ETH_HLEN; - if (skb->protocol == htons(ETH_P_8021Q)) - length -= VLAN_HLEN; - return length; + if (unlikely(!is_ip(skb))) + return 0; + + err = make_writable(skb, skb_network_offset(skb) + + sizeof(struct iphdr)); + if (unlikely(err)) + return err; + + nh = ip_hdr(skb); + nwaddr = nla_type(a) == OVS_ACTION_ATTR_SET_NW_SRC ? &nh->saddr : &nh->daddr; + + check = get_l4_checksum(skb); + if (likely(check)) + inet_proto_csum_replace4(check, skb, *nwaddr, new_nwaddr, 1); + csum_replace4(&nh->check, *nwaddr, new_nwaddr); + + skb_clear_rxhash(skb); + + *nwaddr = new_nwaddr; + + return 0; } -int dp_xmit_skb(struct sk_buff *skb) +static int set_nw_tos(struct sk_buff *skb, u8 nw_tos) { - struct datapath *dp = skb->dev->br_port->dp; - int len = skb->len; + struct iphdr *nh = ip_hdr(skb); + u8 old, new; + int err; - if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) { - printk(KERN_WARNING "%s: dropped over-mtu packet: %d > %d\n", - dp_name(dp), packet_length(skb), skb->dev->mtu); - kfree_skb(skb); - return -E2BIG; - } + if (unlikely(!is_ip(skb))) + return 0; - dev_queue_xmit(skb); + err = make_writable(skb, skb_network_offset(skb) + + sizeof(struct iphdr)); + if (unlikely(err)) + return err; - return len; + /* Set the DSCP bits and preserve the ECN bits. */ + old = nh->tos; + new = nw_tos | (nh->tos & INET_ECN_MASK); + csum_replace4(&nh->check, (__force __be32)old, + (__force __be32)new); + nh->tos = new; + + return 0; } -static void -do_output(struct datapath *dp, struct sk_buff *skb, int out_port) +static int set_tp_port(struct sk_buff *skb, const struct nlattr *a) { - struct net_bridge_port *p; - struct net_device *dev; + struct udphdr *th; + __sum16 *check; + __be16 *port; + int err; - if (!skb) - goto error; + if (unlikely(!is_ip(skb))) + return 0; + + err = make_writable(skb, skb_transport_offset(skb) + + sizeof(struct tcphdr)); + if (unlikely(err)) + return err; + + /* Must follow make_writable() since that can move the skb data. */ + check = get_l4_checksum(skb); + if (unlikely(!check)) + return 0; + + /* + * Update port and checksum. + * + * This is OK because source and destination port numbers are at the + * same offsets in both UDP and TCP headers, and get_l4_checksum() only + * supports those protocols. + */ + th = udp_hdr(skb); + port = nla_type(a) == OVS_ACTION_ATTR_SET_TP_SRC ? &th->source : &th->dest; + inet_proto_csum_replace2(check, skb, *port, nla_get_be16(a), 0); + *port = nla_get_be16(a); + skb_clear_rxhash(skb); - p = dp->ports[out_port]; - if (!p) - goto error; + return 0; +} - dev = skb->dev = p->dev; - if (is_dp_dev(dev)) - dp_dev_recv(dev, skb); - else - dp_xmit_skb(skb); - return; +static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) +{ + struct vport *vport; -error: - kfree_skb(skb); + if (unlikely(!skb)) + return -ENOMEM; + + vport = rcu_dereference(dp->ports[out_port]); + if (unlikely(!vport)) { + kfree_skb(skb); + return -ENODEV; + } + + vport_send(vport, skb); + return 0; } -/* Never consumes 'skb'. Returns a port that 'skb' should be sent to, -1 if - * none. */ -static int output_group(struct datapath *dp, __u16 group, - struct sk_buff *skb, gfp_t gfp) +static int output_userspace(struct datapath *dp, struct sk_buff *skb, + const struct nlattr *attr) { - struct dp_port_group *g = rcu_dereference(dp->groups[group]); - int prev_port = -1; - int i; - - if (!g) - return -1; - for (i = 0; i < g->n_ports; i++) { - struct net_bridge_port *p = dp->ports[g->ports[i]]; - if (!p || skb->dev == p->dev) - continue; - if (prev_port != -1) { - struct sk_buff *clone = skb_clone(skb, gfp); - if (!clone) - return -1; - do_output(dp, clone, prev_port); + struct dp_upcall_info upcall; + const struct nlattr *a; + int rem; + + upcall.cmd = OVS_PACKET_CMD_ACTION; + upcall.key = &OVS_CB(skb)->flow->key; + upcall.userdata = NULL; + upcall.pid = 0; + + for (a = nla_data(attr), rem = nla_len(attr); rem > 0; + a = nla_next(a, &rem)) { + switch (nla_type(a)) { + case OVS_USERSPACE_ATTR_USERDATA: + upcall.userdata = a; + break; + + case OVS_USERSPACE_ATTR_PID: + upcall.pid = nla_get_u32(a); + break; } - prev_port = p->port_no; } - return prev_port; + + return dp_upcall(dp, skb, &upcall); } -static int -output_control(struct datapath *dp, struct sk_buff *skb, u32 arg, gfp_t gfp) +static int sample(struct datapath *dp, struct sk_buff *skb, + const struct nlattr *attr) { - skb = skb_clone(skb, gfp); - if (!skb) - return -ENOMEM; - return dp_output_control(dp, skb, _ODPL_ACTION_NR, arg); + const struct nlattr *acts_list = NULL; + const struct nlattr *a; + int rem; + + for (a = nla_data(attr), rem = nla_len(attr); rem > 0; + a = nla_next(a, &rem)) { + switch (nla_type(a)) { + case OVS_SAMPLE_ATTR_PROBABILITY: + if (net_random() >= nla_get_u32(a)) + return 0; + break; + + case OVS_SAMPLE_ATTR_ACTIONS: + acts_list = a; + break; + } + } + + return do_execute_actions(dp, skb, nla_data(acts_list), + nla_len(acts_list), true); } /* Execute a list of actions against 'skb'. */ -int execute_actions(struct datapath *dp, struct sk_buff *skb, - struct odp_flow_key *key, - const union odp_action *a, int n_actions, - gfp_t gfp) +static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, + const struct nlattr *attr, int len, bool keep_skb) { /* Every output action needs a separate clone of 'skb', but the common * case is just a single output action, so that doing a clone and * then freeing the original skbuff is wasteful. So the following code * is slightly obscure just to avoid that. */ int prev_port = -1; - int err; - for (; n_actions > 0; a++, n_actions--) { - WARN_ON_ONCE(skb_shared(skb)); + u32 priority = skb->priority; + const struct nlattr *a; + int rem; + + for (a = attr, rem = len; rem > 0; + a = nla_next(a, &rem)) { + int err = 0; + if (prev_port != -1) { - do_output(dp, skb_clone(skb, gfp), prev_port); + do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port); prev_port = -1; } - switch (a->type) { - case ODPAT_OUTPUT: - prev_port = a->output.port; + switch (nla_type(a)) { + case OVS_ACTION_ATTR_OUTPUT: + prev_port = nla_get_u32(a); break; - case ODPAT_OUTPUT_GROUP: - prev_port = output_group(dp, a->output_group.group, - skb, gfp); + case OVS_ACTION_ATTR_USERSPACE: + output_userspace(dp, skb, a); break; - case ODPAT_CONTROLLER: - err = output_control(dp, skb, a->controller.arg, gfp); - if (err) { - kfree_skb(skb); + case OVS_ACTION_ATTR_SET_TUNNEL: + OVS_CB(skb)->tun_id = nla_get_be64(a); + break; + + case OVS_ACTION_ATTR_PUSH_VLAN: + err = push_vlan(skb, nla_get_be16(a)); + if (unlikely(err)) /* skb already freed */ return err; - } break; - case ODPAT_SET_VLAN_VID: - case ODPAT_SET_VLAN_PCP: - skb = modify_vlan_tci(dp, skb, key, a, n_actions, gfp); - if (IS_ERR(skb)) - return PTR_ERR(skb); + case OVS_ACTION_ATTR_POP_VLAN: + err = pop_vlan(skb); + break; + + case OVS_ACTION_ATTR_SET_DL_SRC: + err = make_writable(skb, ETH_HLEN); + if (likely(!err)) + memcpy(eth_hdr(skb)->h_source, nla_data(a), ETH_ALEN); break; - case ODPAT_STRIP_VLAN: - skb = strip_vlan(skb, key, gfp); + case OVS_ACTION_ATTR_SET_DL_DST: + err = make_writable(skb, ETH_HLEN); + if (likely(!err)) + memcpy(eth_hdr(skb)->h_dest, nla_data(a), ETH_ALEN); break; - case ODPAT_SET_DL_SRC: - case ODPAT_SET_DL_DST: - skb = set_dl_addr(skb, &a->dl_addr, gfp); + case OVS_ACTION_ATTR_SET_NW_SRC: + case OVS_ACTION_ATTR_SET_NW_DST: + err = set_nw_addr(skb, a); break; - case ODPAT_SET_NW_SRC: - case ODPAT_SET_NW_DST: - skb = set_nw_addr(skb, key, &a->nw_addr, gfp); + case OVS_ACTION_ATTR_SET_NW_TOS: + err = set_nw_tos(skb, nla_get_u8(a)); break; - case ODPAT_SET_TP_SRC: - case ODPAT_SET_TP_DST: - skb = set_tp_port(skb, key, &a->tp_port, gfp); + case OVS_ACTION_ATTR_SET_TP_SRC: + case OVS_ACTION_ATTR_SET_TP_DST: + err = set_tp_port(skb, a); break; + + case OVS_ACTION_ATTR_SET_PRIORITY: + skb->priority = nla_get_u32(a); + break; + + case OVS_ACTION_ATTR_POP_PRIORITY: + skb->priority = priority; + break; + + case OVS_ACTION_ATTR_SAMPLE: + err = sample(dp, skb, a); + break; + + } + if (unlikely(err)) { + kfree_skb(skb); + return err; } - if (!skb) - return -ENOMEM; } - if (prev_port != -1) + + if (prev_port != -1) { + if (keep_skb) + skb = skb_clone(skb, GFP_ATOMIC); + do_output(dp, skb, prev_port); - else - kfree_skb(skb); + } else if (!keep_skb) + consume_skb(skb); + return 0; } + +/* We limit the number of times that we pass into execute_actions() + * to avoid blowing out the stack in the event that we have a loop. */ +#define MAX_LOOPS 5 + +struct loop_counter { + u8 count; /* Count. */ + bool looping; /* Loop detected? */ +}; + +static DEFINE_PER_CPU(struct loop_counter, loop_counters); + +static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions) +{ + if (net_ratelimit()) + pr_warn("%s: flow looped %d times, dropping\n", + dp_name(dp), MAX_LOOPS); + actions->actions_len = 0; + return -ELOOP; +} + +/* Execute a list of actions against 'skb'. */ +int execute_actions(struct datapath *dp, struct sk_buff *skb) +{ + struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); + struct loop_counter *loop; + int error; + + /* Check whether we've looped too much. */ + loop = &__get_cpu_var(loop_counters); + if (unlikely(++loop->count > MAX_LOOPS)) + loop->looping = true; + if (unlikely(loop->looping)) { + error = loop_suppress(dp, acts); + kfree_skb(skb); + goto out_loop; + } + + OVS_CB(skb)->tun_id = 0; + error = do_execute_actions(dp, skb, acts->actions, + acts->actions_len, false); + + /* Check whether sub-actions looped too much. */ + if (unlikely(loop->looping)) + error = loop_suppress(dp, acts); + +out_loop: + /* Decrement loop counter. */ + if (!--loop->count) + loop->looping = false; + + return error; +}