X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=datapath%2Factions.c;h=de98d990c2dff6f79ade48695983a8324e0ff321;hb=9cb8877cf7d1d86101a8f27829ad47ea0c8b1fe5;hp=3223c65bd66b75680caf790ac470ff9f498e6d58;hpb=7aec165dbc4690c8c2c703d142e2f017bb851d31;p=openvswitch diff --git a/datapath/actions.c b/datapath/actions.c index 3223c65b..de98d990 100644 --- a/datapath/actions.c +++ b/datapath/actions.c @@ -23,12 +23,13 @@ #include "actions.h" #include "checksum.h" #include "datapath.h" +#include "loop_counter.h" #include "openvswitch/datapath-protocol.h" +#include "vlan.h" #include "vport.h" static int do_execute_actions(struct datapath *, struct sk_buff *, - const struct sw_flow_key *, - const struct nlattr *actions, u32 actions_len); + struct sw_flow_actions *acts); static struct sk_buff *make_writable(struct sk_buff *skb, unsigned min_headroom) { @@ -52,20 +53,28 @@ static struct sk_buff *make_writable(struct sk_buff *skb, unsigned min_headroom) return NULL; } -static struct sk_buff *vlan_pull_tag(struct sk_buff *skb) +static struct sk_buff *strip_vlan(struct sk_buff *skb) { - struct vlan_ethhdr *vh = vlan_eth_hdr(skb); struct ethhdr *eh; - /* Verify we were given a vlan packet */ - if (vh->h_vlan_proto != htons(ETH_P_8021Q) || skb->len < VLAN_ETH_HLEN) + if (vlan_tx_tag_present(skb)) { + vlan_set_tci(skb, 0); return skb; + } + + if (unlikely(vlan_eth_hdr(skb)->h_vlan_proto != htons(ETH_P_8021Q) || + skb->len < VLAN_ETH_HLEN)) + return skb; + + skb = make_writable(skb, 0); + if (unlikely(!skb)) + return NULL; if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) skb->csum = csum_sub(skb->csum, csum_partial(skb->data + ETH_HLEN, VLAN_HLEN, 0)); - memmove(skb->data + VLAN_HLEN, skb->data, 2 * VLAN_ETH_ALEN); + memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN); @@ -75,167 +84,62 @@ static struct sk_buff *vlan_pull_tag(struct sk_buff *skb) return skb; } -static struct sk_buff *modify_vlan_tci(struct datapath *dp, struct sk_buff *skb, - const struct sw_flow_key *key, - const struct nlattr *a, u32 actions_len) +static struct sk_buff *modify_vlan_tci(struct sk_buff *skb, __be16 tci) { - __be16 tci = nla_get_be16(a); - - skb = make_writable(skb, VLAN_HLEN); - if (!skb) - return ERR_PTR(-ENOMEM); + struct vlan_ethhdr *vh; + __be16 old_tci; - if (skb->protocol == htons(ETH_P_8021Q)) { - /* Modify vlan id, but maintain other TCI values */ - struct vlan_ethhdr *vh; - __be16 old_tci; + if (vlan_tx_tag_present(skb) || skb->protocol != htons(ETH_P_8021Q)) + return __vlan_hwaccel_put_tag(skb, ntohs(tci)); - if (skb->len < VLAN_ETH_HLEN) - return skb; - - vh = vlan_eth_hdr(skb); - old_tci = vh->h_vlan_TCI; - - vh->h_vlan_TCI = tci; - - if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) { - __be16 diff[] = { ~old_tci, vh->h_vlan_TCI }; - - skb->csum = ~csum_partial((char *)diff, sizeof(diff), - ~skb->csum); - } - } else { - int err; - - /* Add vlan header */ + skb = make_writable(skb, 0); + if (unlikely(!skb)) + return NULL; - /* Set up checksumming pointers for checksum-deferred packets - * on Xen. Otherwise, dev_queue_xmit() will try to do this - * when we send the packet out on the wire, and it will fail at - * that point because skb_checksum_setup() will not look inside - * an 802.1Q header. */ - err = vswitch_skb_checksum_setup(skb); - if (unlikely(err)) { - kfree_skb(skb); - return ERR_PTR(err); - } + if (unlikely(skb->len < VLAN_ETH_HLEN)) + return skb; - /* GSO is not implemented for packets with an 802.1Q header, so - * we have to do segmentation before we add that header. - * - * GSO does work with hardware-accelerated VLAN tagging, but we - * can't use hardware-accelerated VLAN tagging since it - * requires the device to have a VLAN group configured (with - * e.g. vconfig(8)) and we don't do that. - * - * Having to do this here may be a performance loss, since we - * can't take advantage of TSO hardware support, although it - * does not make a measurable network performance difference - * for 1G Ethernet. Fixing that would require patching the - * kernel (either to add GSO support to the VLAN protocol or to - * support hardware-accelerated VLAN tagging without VLAN - * groups configured). */ - if (skb_is_gso(skb)) { - const struct nlattr *actions_left; - int actions_len_left; - struct sk_buff *segs; - - segs = skb_gso_segment(skb, 0); - kfree_skb(skb); - if (IS_ERR(segs)) - return ERR_CAST(segs); - - actions_len_left = actions_len; - actions_left = nla_next(a, &actions_len_left); - - do { - struct sk_buff *nskb = segs->next; - - segs->next = NULL; - - /* GSO can change the checksum type so update.*/ - compute_ip_summed(segs, true); - - segs = __vlan_put_tag(segs, ntohs(tci)); - err = -ENOMEM; - if (segs) { - err = do_execute_actions( - dp, segs, key, actions_left, - actions_len_left); - } - - if (unlikely(err)) { - while ((segs = nskb)) { - nskb = segs->next; - segs->next = NULL; - kfree_skb(segs); - } - return ERR_PTR(err); - } - - segs = nskb; - } while (segs->next); - - skb = segs; - compute_ip_summed(skb, true); - } + vh = vlan_eth_hdr(skb); - /* The hardware-accelerated version of vlan_put_tag() works - * only for a device that has a VLAN group configured (with - * e.g. vconfig(8)), so call the software-only version - * __vlan_put_tag() directly instead. - */ - skb = __vlan_put_tag(skb, ntohs(tci)); - if (!skb) - return ERR_PTR(-ENOMEM); + old_tci = vh->h_vlan_TCI; + vh->h_vlan_TCI = tci; - /* GSO doesn't fix up the hardware computed checksum so this - * will only be hit in the non-GSO case. */ - if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) - skb->csum = csum_add(skb->csum, csum_partial(skb->data - + ETH_HLEN, VLAN_HLEN, 0)); + if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) { + __be16 diff[] = { ~old_tci, vh->h_vlan_TCI }; + skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum); } return skb; } -static struct sk_buff *strip_vlan(struct sk_buff *skb) -{ - skb = make_writable(skb, 0); - if (skb) - vlan_pull_tag(skb); - return skb; -} - -static bool is_ip(struct sk_buff *skb, const struct sw_flow_key *key) +static bool is_ip(struct sk_buff *skb) { - return (key->dl_type == htons(ETH_P_IP) && + return (OVS_CB(skb)->flow->key.eth.type == htons(ETH_P_IP) && skb->transport_header > skb->network_header); } -static __sum16 *get_l4_checksum(struct sk_buff *skb, const struct sw_flow_key *key) +static __sum16 *get_l4_checksum(struct sk_buff *skb) { + u8 nw_proto = OVS_CB(skb)->flow->key.ip.proto; int transport_len = skb->len - skb_transport_offset(skb); - if (key->nw_proto == IPPROTO_TCP) { + if (nw_proto == IPPROTO_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) return &tcp_hdr(skb)->check; - } else if (key->nw_proto == IPPROTO_UDP) { + } else if (nw_proto == IPPROTO_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) return &udp_hdr(skb)->check; } return NULL; } -static struct sk_buff *set_nw_addr(struct sk_buff *skb, - const struct sw_flow_key *key, - const struct nlattr *a) +static struct sk_buff *set_nw_addr(struct sk_buff *skb, const struct nlattr *a) { __be32 new_nwaddr = nla_get_be32(a); struct iphdr *nh; __sum16 *check; __be32 *nwaddr; - if (unlikely(!is_ip(skb, key))) + if (unlikely(!is_ip(skb))) return skb; skb = make_writable(skb, 0); @@ -245,21 +149,21 @@ static struct sk_buff *set_nw_addr(struct sk_buff *skb, nh = ip_hdr(skb); nwaddr = nla_type(a) == ODP_ACTION_ATTR_SET_NW_SRC ? &nh->saddr : &nh->daddr; - check = get_l4_checksum(skb, key); + check = get_l4_checksum(skb); if (likely(check)) inet_proto_csum_replace4(check, skb, *nwaddr, new_nwaddr, 1); csum_replace4(&nh->check, *nwaddr, new_nwaddr); + skb_clear_rxhash(skb); + *nwaddr = new_nwaddr; return skb; } -static struct sk_buff *set_nw_tos(struct sk_buff *skb, - const struct sw_flow_key *key, - u8 nw_tos) +static struct sk_buff *set_nw_tos(struct sk_buff *skb, u8 nw_tos) { - if (unlikely(!is_ip(skb, key))) + if (unlikely(!is_ip(skb))) return skb; skb = make_writable(skb, 0); @@ -278,15 +182,13 @@ static struct sk_buff *set_nw_tos(struct sk_buff *skb, return skb; } -static struct sk_buff *set_tp_port(struct sk_buff *skb, - const struct sw_flow_key *key, - const struct nlattr *a) +static struct sk_buff *set_tp_port(struct sk_buff *skb, const struct nlattr *a) { struct udphdr *th; __sum16 *check; __be16 *port; - if (unlikely(!is_ip(skb, key))) + if (unlikely(!is_ip(skb))) return skb; skb = make_writable(skb, 0); @@ -294,7 +196,7 @@ static struct sk_buff *set_tp_port(struct sk_buff *skb, return NULL; /* Must follow make_writable() since that can move the skb data. */ - check = get_l4_checksum(skb, key); + check = get_l4_checksum(skb); if (unlikely(!check)) return skb; @@ -309,6 +211,7 @@ static struct sk_buff *set_tp_port(struct sk_buff *skb, port = nla_type(a) == ODP_ACTION_ATTR_SET_TP_SRC ? &th->source : &th->dest; inet_proto_csum_replace2(check, skb, *port, nla_get_be16(a), 0); *port = nla_get_be16(a); + skb_clear_rxhash(skb); return skb; } @@ -318,17 +221,16 @@ static struct sk_buff *set_tp_port(struct sk_buff *skb, * * @skb: skbuff containing an Ethernet packet, with network header pointing * just past the Ethernet and optional 802.1Q header. - * @key: flow key extracted from @skb by flow_extract() * * Returns true if @skb is an invalid Ethernet+IPv4 ARP packet: one with screwy * or truncated header fields or one whose inner and outer Ethernet address * differ. */ -static bool is_spoofed_arp(struct sk_buff *skb, const struct sw_flow_key *key) +static bool is_spoofed_arp(struct sk_buff *skb) { struct arp_eth_header *arp; - if (key->dl_type != htons(ETH_P_ARP)) + if (OVS_CB(skb)->flow->key.eth.type != htons(ETH_P_ARP)) return false; if (skb_network_offset(skb) + sizeof(struct arp_eth_header) > skb->len) @@ -360,8 +262,7 @@ error: kfree_skb(skb); } -static int output_control(struct datapath *dp, struct sk_buff *skb, u64 arg, - const struct sw_flow_key *key) +static int output_control(struct datapath *dp, struct sk_buff *skb, u64 arg) { struct dp_upcall_info upcall; @@ -370,7 +271,7 @@ static int output_control(struct datapath *dp, struct sk_buff *skb, u64 arg, return -ENOMEM; upcall.cmd = ODP_PACKET_CMD_ACTION; - upcall.key = key; + upcall.key = &OVS_CB(skb)->flow->key; upcall.userdata = arg; upcall.sample_pool = 0; upcall.actions = NULL; @@ -380,8 +281,7 @@ static int output_control(struct datapath *dp, struct sk_buff *skb, u64 arg, /* Execute a list of actions against 'skb'. */ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, - const struct sw_flow_key *key, - const struct nlattr *actions, u32 actions_len) + struct sw_flow_actions *acts) { /* Every output action needs a separate clone of 'skb', but the common * case is just a single output action, so that doing a clone and @@ -392,7 +292,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, const struct nlattr *a; int rem, err; - for (a = actions, rem = actions_len; rem > 0; a = nla_next(a, &rem)) { + for (a = acts->actions, rem = acts->actions_len; rem > 0; + a = nla_next(a, &rem)) { if (prev_port != -1) { do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port); prev_port = -1; @@ -404,7 +305,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case ODP_ACTION_ATTR_CONTROLLER: - err = output_control(dp, skb, nla_get_u64(a), key); + err = output_control(dp, skb, nla_get_u64(a)); if (err) { kfree_skb(skb); return err; @@ -416,9 +317,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case ODP_ACTION_ATTR_SET_DL_TCI: - skb = modify_vlan_tci(dp, skb, key, a, rem); - if (IS_ERR(skb)) - return PTR_ERR(skb); + skb = modify_vlan_tci(skb, nla_get_be16(a)); break; case ODP_ACTION_ATTR_STRIP_VLAN: @@ -441,16 +340,16 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, case ODP_ACTION_ATTR_SET_NW_SRC: case ODP_ACTION_ATTR_SET_NW_DST: - skb = set_nw_addr(skb, key, a); + skb = set_nw_addr(skb, a); break; case ODP_ACTION_ATTR_SET_NW_TOS: - skb = set_nw_tos(skb, key, nla_get_u8(a)); + skb = set_nw_tos(skb, nla_get_u8(a)); break; case ODP_ACTION_ATTR_SET_TP_SRC: case ODP_ACTION_ATTR_SET_TP_DST: - skb = set_tp_port(skb, key, a); + skb = set_tp_port(skb, a); break; case ODP_ACTION_ATTR_SET_PRIORITY: @@ -462,7 +361,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case ODP_ACTION_ATTR_DROP_SPOOFED_ARP: - if (unlikely(is_spoofed_arp(skb, key))) + if (unlikely(is_spoofed_arp(skb))) goto exit; break; } @@ -478,8 +377,7 @@ exit: } static void sflow_sample(struct datapath *dp, struct sk_buff *skb, - const struct sw_flow_key *key, - const struct nlattr *a, u32 actions_len) + struct sw_flow_actions *acts) { struct sk_buff *nskb; struct vport *p = OVS_CB(skb)->vport; @@ -497,23 +395,46 @@ static void sflow_sample(struct datapath *dp, struct sk_buff *skb, return; upcall.cmd = ODP_PACKET_CMD_SAMPLE; - upcall.key = key; + upcall.key = &OVS_CB(skb)->flow->key; upcall.userdata = 0; upcall.sample_pool = atomic_read(&p->sflow_pool); - upcall.actions = a; - upcall.actions_len = actions_len; + upcall.actions = acts->actions; + upcall.actions_len = acts->actions_len; dp_upcall(dp, nskb, &upcall); } /* Execute a list of actions against 'skb'. */ -int execute_actions(struct datapath *dp, struct sk_buff *skb, - const struct sw_flow_key *key, - const struct nlattr *actions, u32 actions_len) +int execute_actions(struct datapath *dp, struct sk_buff *skb) { - if (dp->sflow_probability) - sflow_sample(dp, skb, key, actions, actions_len); + struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); + struct loop_counter *loop; + int error; + + /* Check whether we've looped too much. */ + loop = loop_get_counter(); + if (unlikely(++loop->count > MAX_LOOPS)) + loop->looping = true; + if (unlikely(loop->looping)) { + error = loop_suppress(dp, acts); + kfree_skb(skb); + goto out_loop; + } + /* Really execute actions. */ + if (dp->sflow_probability) + sflow_sample(dp, skb, acts); OVS_CB(skb)->tun_id = 0; + error = do_execute_actions(dp, skb, acts); + + /* Check whether sub-actions looped too much. */ + if (unlikely(loop->looping)) + error = loop_suppress(dp, acts); + +out_loop: + /* Decrement loop counter. */ + if (!--loop->count) + loop->looping = false; + loop_put_counter(); - return do_execute_actions(dp, skb, key, actions, actions_len); + return error; }