X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=datapath%2Fflow.c;h=fdfe3e9467119bb7a48effe37dea6c7187dcbc95;hb=dabd3fe3f8c0161aab5dc8e2cefb6375e810a38b;hp=78dea3a66283de0df43f1f2c03b8ba7bb565d651;hpb=0fd0d0834f79d6cfe8a0eccc19732bae365aa575;p=openvswitch diff --git a/datapath/flow.c b/datapath/flow.c index 78dea3a6..fdfe3e94 100644 --- a/datapath/flow.c +++ b/datapath/flow.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2011 Nicira Networks. + * Copyright (c) 2007-2011 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -47,7 +46,6 @@ #include "vlan.h" static struct kmem_cache *flow_cache; -static unsigned int hash_seed __read_mostly; static int check_header(struct sk_buff *skb, int len) { @@ -186,8 +184,10 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) { u8 tcp_flags = 0; - if (flow->key.eth.type == htons(ETH_P_IP) && - flow->key.ip.proto == IPPROTO_TCP) { + if ((flow->key.eth.type == htons(ETH_P_IP) || + flow->key.eth.type == htons(ETH_P_IPV6)) && + flow->key.ip.proto == IPPROTO_TCP && + likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { u8 *tcp = (u8 *)tcp_hdr(skb); tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; } @@ -205,10 +205,7 @@ struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions) int actions_len = nla_len(actions); struct sw_flow_actions *sfa; - /* At least DP_MAX_PORTS actions are required to be able to flood a - * packet to every port. Factor of 2 allows for setting VLAN tags, - * etc. */ - if (actions_len > 2 * DP_MAX_PORTS * nla_total_size(4)) + if (actions_len > MAX_ACTIONS_BUFSIZE) return ERR_PTR(-EINVAL); sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL); @@ -229,15 +226,14 @@ struct sw_flow *ovs_flow_alloc(void) return ERR_PTR(-ENOMEM); spin_lock_init(&flow->lock); - atomic_set(&flow->refcnt, 1); flow->sf_acts = NULL; - flow->dead = false; return flow; } static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) { + hash = jhash_1word(hash, table->hash_seed); return flex_array_get(table->buckets, (hash & (table->n_buckets - 1))); } @@ -285,16 +281,13 @@ struct flow_table *ovs_flow_tbl_alloc(int new_size) } table->n_buckets = new_size; table->count = 0; + table->node_ver = 0; + table->keep_flows = false; + get_random_bytes(&table->hash_seed, sizeof(u32)); return table; } -static void flow_free(struct sw_flow *flow) -{ - flow->dead = true; - ovs_flow_put(flow); -} - void ovs_flow_tbl_destroy(struct flow_table *table) { int i; @@ -302,17 +295,22 @@ void ovs_flow_tbl_destroy(struct flow_table *table) if (!table) return; + if (table->keep_flows) + goto skip_flows; + for (i = 0; i < table->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head = flex_array_get(table->buckets, i); struct hlist_node *node, *n; + int ver = table->node_ver; - hlist_for_each_entry_safe(flow, node, n, head, hash_node) { - hlist_del_init_rcu(&flow->hash_node); - flow_free(flow); + hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) { + hlist_del_rcu(&flow->hash_node[ver]); + ovs_flow_free(flow); } } +skip_flows: free_buckets(table->buckets); kfree(table); } @@ -337,12 +335,14 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la struct sw_flow *flow; struct hlist_head *head; struct hlist_node *n; + int ver; int i; + ver = table->node_ver; while (*bucket < table->n_buckets) { i = 0; head = flex_array_get(table->buckets, *bucket); - hlist_for_each_entry_rcu(flow, n, head, hash_node) { + hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) { if (i < *last) { i++; continue; @@ -357,62 +357,81 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la return NULL; } -struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) +static void __flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) { - struct flow_table *new_table; - int n_buckets = table->n_buckets * 2; + struct hlist_head *head; + head = find_bucket(table, flow->hash); + hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); + table->count++; +} + +static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new) +{ + int old_ver; int i; - new_table = ovs_flow_tbl_alloc(n_buckets); - if (!new_table) - return ERR_PTR(-ENOMEM); + old_ver = old->node_ver; + new->node_ver = !old_ver; - for (i = 0; i < table->n_buckets; i++) { + /* Insert in new table. */ + for (i = 0; i < old->n_buckets; i++) { struct sw_flow *flow; struct hlist_head *head; - struct hlist_node *n, *pos; + struct hlist_node *n; - head = flex_array_get(table->buckets, i); + head = flex_array_get(old->buckets, i); - hlist_for_each_entry_safe(flow, n, pos, head, hash_node) { - hlist_del_init_rcu(&flow->hash_node); - ovs_flow_tbl_insert(new_table, flow); - } + hlist_for_each_entry(flow, n, head, hash_node[old_ver]) + __flow_tbl_insert(new, flow); } - - return new_table; + old->keep_flows = true; } -/* RCU callback used by ovs_flow_deferred_free. */ -static void rcu_free_flow_callback(struct rcu_head *rcu) +static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets) { - struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); + struct flow_table *new_table; + + new_table = ovs_flow_tbl_alloc(n_buckets); + if (!new_table) + return ERR_PTR(-ENOMEM); + + flow_table_copy_flows(table, new_table); - flow->dead = true; - ovs_flow_put(flow); + return new_table; } -/* Schedules 'flow' to be freed after the next RCU grace period. - * The caller must hold rcu_read_lock for this to be sensible. */ -void ovs_flow_deferred_free(struct sw_flow *flow) +struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table) { - call_rcu(&flow->rcu, rcu_free_flow_callback); + return __flow_tbl_rehash(table, table->n_buckets); } -void ovs_flow_hold(struct sw_flow *flow) +struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) { - atomic_inc(&flow->refcnt); + return __flow_tbl_rehash(table, table->n_buckets * 2); } -void ovs_flow_put(struct sw_flow *flow) +void ovs_flow_free(struct sw_flow *flow) { if (unlikely(!flow)) return; - if (atomic_dec_and_test(&flow->refcnt)) { - kfree((struct sf_flow_acts __force *)flow->sf_acts); - kmem_cache_free(flow_cache, flow); - } + kfree((struct sf_flow_acts __force *)flow->sf_acts); + kmem_cache_free(flow_cache, flow); +} + +/* RCU callback used by ovs_flow_deferred_free. */ +static void rcu_free_flow_callback(struct rcu_head *rcu) +{ + struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); + + ovs_flow_free(flow); +} + +/* Schedules 'flow' to be freed after the next RCU grace period. + * The caller must hold rcu_read_lock for this to be sensible. */ +void ovs_flow_deferred_free(struct sw_flow *flow) +{ + call_rcu(&flow->rcu, rcu_free_flow_callback); } /* RCU callback used by ovs_flow_deferred_free_acts. */ @@ -602,7 +621,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, memset(key, 0, sizeof(*key)); key->phy.priority = skb->priority; - key->phy.tun_id = OVS_CB(skb)->tun_id; + if (OVS_CB(skb)->tun_key) + memcpy(&key->phy.tun.tun_key, OVS_CB(skb)->tun_key, sizeof(key->phy.tun.tun_key)); key->phy.in_port = in_port; skb_reset_mac_header(skb); @@ -689,7 +709,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, } } - } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) { + } else if ((key->eth.type == htons(ETH_P_ARP) || + key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) { struct arp_eth_header *arp; arp = (struct arp_eth_header *)skb_network_header(skb); @@ -702,15 +723,11 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key, /* We only match on the lower 8 bits of the opcode. */ if (ntohs(arp->ar_op) <= 0xff) key->ip.proto = ntohs(arp->ar_op); - - if (key->ip.proto == ARPOP_REQUEST - || key->ip.proto == ARPOP_REPLY) { - memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); - memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); - memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); - memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); - key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); - } + memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); + memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); + memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN); + memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN); + key_len = SW_FLOW_KEY_OFFSET(ipv4.arp); } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ @@ -759,9 +776,18 @@ out: return error; } -u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len) +static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len) { - return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), hash_seed); + return jhash2((u32 *)((u8 *)key + key_start), + DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0); +} + +static int flow_key_start(struct sw_flow_key *key) +{ + if (key->phy.tun.tun_key.ipv4_dst) + return 0; + else + return offsetof(struct sw_flow_key, phy.priority); } struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, @@ -770,37 +796,38 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table, struct sw_flow *flow; struct hlist_node *n; struct hlist_head *head; + u8 *_key; + int key_start; u32 hash; - hash = ovs_flow_hash(key, key_len); + key_start = flow_key_start(key); + hash = ovs_flow_hash(key, key_start, key_len); + _key = (u8 *) key + key_start; head = find_bucket(table, hash); - hlist_for_each_entry_rcu(flow, n, head, hash_node) { + hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) { if (flow->hash == hash && - !memcmp(&flow->key, key, key_len)) { + !memcmp((u8 *)&flow->key + key_start, _key, key_len - key_start)) { return flow; } } return NULL; } -void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow) +void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, + struct sw_flow_key *key, int key_len) { - struct hlist_head *head; - - head = find_bucket(table, flow->hash); - hlist_add_head_rcu(&flow->hash_node, head); - table->count++; + flow->hash = ovs_flow_hash(key, flow_key_start(key), key_len); + memcpy(&flow->key, key, sizeof(flow->key)); + __flow_tbl_insert(table, flow); } void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) { - if (!hlist_unhashed(&flow->hash_node)) { - hlist_del_init_rcu(&flow->hash_node); - table->count--; - BUG_ON(table->count < 0); - } + hlist_del_rcu(&flow->hash_node[table->node_ver]); + table->count--; + BUG_ON(table->count < 0); } /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ @@ -819,6 +846,7 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), + [OVS_KEY_ATTR_IPV4_TUNNEL] = sizeof(struct ovs_key_ipv4_tunnel), /* Not upstream. */ [OVS_KEY_ATTR_TUN_ID] = sizeof(__be64), @@ -994,12 +1022,41 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, swkey->phy.in_port = in_port; attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); } else { - swkey->phy.in_port = USHRT_MAX; + swkey->phy.in_port = DP_MAX_PORTS; } - if (attrs & (1ULL << OVS_KEY_ATTR_TUN_ID)) { - swkey->phy.tun_id = nla_get_be64(a[OVS_KEY_ATTR_TUN_ID]); + if (attrs & (1ULL << OVS_KEY_ATTR_TUN_ID) && + attrs & (1ULL << OVS_KEY_ATTR_IPV4_TUNNEL)) { + struct ovs_key_ipv4_tunnel *tun_key; + __be64 tun_id; + + tun_key = nla_data(a[OVS_KEY_ATTR_IPV4_TUNNEL]); + + if (!tun_key->ipv4_dst) + return -EINVAL; + if (!(tun_key->tun_flags & OVS_TNL_F_KEY)) + return -EINVAL; + + tun_id = nla_get_be64(a[OVS_KEY_ATTR_TUN_ID]); + if (tun_id != tun_key->tun_id) + return -EINVAL; + + memcpy(&swkey->phy.tun.tun_key, tun_key, + sizeof(swkey->phy.tun.tun_key)); + attrs &= ~(1ULL << OVS_KEY_ATTR_TUN_ID); + attrs &= ~(1ULL << OVS_KEY_ATTR_IPV4_TUNNEL); + } else if (attrs & (1ULL << OVS_KEY_ATTR_IPV4_TUNNEL)) { + struct ovs_key_ipv4_tunnel *tun_key; + tun_key = nla_data(a[OVS_KEY_ATTR_IPV4_TUNNEL]); + + if (!tun_key->ipv4_dst) + return -EINVAL; + + memcpy(&swkey->phy.tun.tun_key, tun_key, + sizeof(swkey->phy.tun.tun_key)); + + attrs &= ~(1ULL << OVS_KEY_ATTR_IPV4_TUNNEL); } /* Data attributes. */ @@ -1100,7 +1157,8 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, if (err) return err; } - } else if (swkey->eth.type == htons(ETH_P_ARP)) { + } else if (swkey->eth.type == htons(ETH_P_ARP) || + swkey->eth.type == htons(ETH_P_RARP)) { const struct ovs_key_arp *arp_key; if (!(attrs & (1 << OVS_KEY_ATTR_ARP))) @@ -1137,15 +1195,17 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, * get the metadata, that is, the parts of the flow key that cannot be * extracted from the packet itself. */ -int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id, - const struct nlattr *attr) + +int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len, const struct nlattr *attr) { + struct ovs_key_ipv4_tunnel *tun_key = &flow->key.phy.tun.tun_key; const struct nlattr *nla; int rem; + __be64 tun_id = 0; - *in_port = USHRT_MAX; - *tun_id = 0; - *priority = 0; + flow->key.phy.in_port = DP_MAX_PORTS; + flow->key.phy.priority = 0; + memset(tun_key, 0, sizeof(flow->key.phy.tun.tun_key)); nla_for_each_nested(nla, attr, rem) { int type = nla_type(nla); @@ -1156,23 +1216,55 @@ int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port, __be64 *tun_id, switch (type) { case OVS_KEY_ATTR_PRIORITY: - *priority = nla_get_u32(nla); + flow->key.phy.priority = nla_get_u32(nla); break; case OVS_KEY_ATTR_TUN_ID: - *tun_id = nla_get_be64(nla); + tun_id = nla_get_be64(nla); + + if (tun_key->ipv4_dst) { + if (!(tun_key->tun_flags & OVS_TNL_F_KEY)) + return -EINVAL; + if (tun_key->tun_id != tun_id) + return -EINVAL; + break; + } + tun_key->tun_id = tun_id; + tun_key->tun_flags |= OVS_TNL_F_KEY; + + break; + + case OVS_KEY_ATTR_IPV4_TUNNEL: + if (tun_key->tun_flags & OVS_TNL_F_KEY) { + tun_id = tun_key->tun_id; + + memcpy(tun_key, nla_data(nla), sizeof(*tun_key)); + if (!(tun_key->tun_flags & OVS_TNL_F_KEY)) + return -EINVAL; + + if (tun_key->tun_id != tun_id) + return -EINVAL; + } else + memcpy(tun_key, nla_data(nla), sizeof(*tun_key)); + + if (!tun_key->ipv4_dst) + return -EINVAL; break; case OVS_KEY_ATTR_IN_PORT: if (nla_get_u32(nla) >= DP_MAX_PORTS) return -EINVAL; - *in_port = nla_get_u32(nla); + flow->key.phy.in_port = nla_get_u32(nla); break; } } } if (rem) return -EINVAL; + + flow->hash = ovs_flow_hash(&flow->key, + flow_key_start(&flow->key), key_len); + return 0; } @@ -1181,14 +1273,25 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) struct ovs_key_ethernet *eth_key; struct nlattr *nla, *encap; - if (swkey->phy.priority) - NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority); + if (swkey->phy.priority && + nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority)) + goto nla_put_failure; - if (swkey->phy.tun_id != cpu_to_be64(0)) - NLA_PUT_BE64(skb, OVS_KEY_ATTR_TUN_ID, swkey->phy.tun_id); + if (swkey->phy.tun.tun_key.ipv4_dst) { + struct ovs_key_ipv4_tunnel *tun_key; + nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4_TUNNEL, sizeof(*tun_key)); + if (!nla) + goto nla_put_failure; + tun_key = nla_data(nla); + memcpy(tun_key, &swkey->phy.tun.tun_key, sizeof(*tun_key)); + } + if ((swkey->phy.tun.tun_key.tun_flags & OVS_TNL_F_KEY) && + nla_put_be64(skb, OVS_KEY_ATTR_TUN_ID, swkey->phy.tun.tun_key.tun_id)) + goto nla_put_failure; - if (swkey->phy.in_port != USHRT_MAX) - NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port); + if (swkey->phy.in_port != DP_MAX_PORTS && + nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port)) + goto nla_put_failure; nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); if (!nla) @@ -1198,8 +1301,9 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { - NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)); - NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci); + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) || + nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci)) + goto nla_put_failure; encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); if (!swkey->eth.tci) goto unencap; @@ -1210,7 +1314,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) if (swkey->eth.type == htons(ETH_P_802_2)) goto unencap; - NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type); + if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type)) + goto nla_put_failure; if (swkey->eth.type == htons(ETH_P_IP)) { struct ovs_key_ipv4 *ipv4_key; @@ -1241,7 +1346,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb) ipv6_key->ipv6_tclass = swkey->ip.tos; ipv6_key->ipv6_hlimit = swkey->ip.ttl; ipv6_key->ipv6_frag = swkey->ip.frag; - } else if (swkey->eth.type == htons(ETH_P_ARP)) { + } else if (swkey->eth.type == htons(ETH_P_ARP) || + swkey->eth.type == htons(ETH_P_RARP)) { struct ovs_key_arp *arp_key; nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); @@ -1345,8 +1451,6 @@ int ovs_flow_init(void) if (flow_cache == NULL) return -ENOMEM; - get_random_bytes(&hash_seed, sizeof(hash_seed)); - return 0; }