return NULL;
}
+static void __flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+{
+ struct hlist_head *head;
+ head = find_bucket(table, flow->hash);
+ hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
+ table->count++;
+}
+
static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
{
int old_ver;
head = flex_array_get(old->buckets, i);
hlist_for_each_entry(flow, n, head, hash_node[old_ver])
- ovs_flow_tbl_insert(new, flow);
+ __flow_tbl_insert(new, flow);
}
old->keep_flows = true;
}
key->phy.priority = skb->priority;
if (OVS_CB(skb)->tun_key)
- memcpy(&key->tun.tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun.tun_key));
+ memcpy(&key->phy.tun.tun_key, OVS_CB(skb)->tun_key, sizeof(key->phy.tun.tun_key));
key->phy.in_port = in_port;
skb_reset_mac_header(skb);
return error;
}
-u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
+static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len)
{
- return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0);
+ return jhash2((u32 *)((u8 *)key + key_start),
+ DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0);
+}
+
+static int flow_key_start(struct sw_flow_key *key)
+{
+ if (key->phy.tun.tun_key.ipv4_dst)
+ return 0;
+ else
+ return offsetof(struct sw_flow_key, phy.priority);
}
struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
struct sw_flow *flow;
struct hlist_node *n;
struct hlist_head *head;
+ u8 *_key;
+ int key_start;
u32 hash;
- hash = ovs_flow_hash(key, key_len);
+ key_start = flow_key_start(key);
+ hash = ovs_flow_hash(key, key_start, key_len);
+ _key = (u8 *) key + key_start;
head = find_bucket(table, hash);
hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) {
if (flow->hash == hash &&
- !memcmp(&flow->key, key, key_len)) {
+ !memcmp((u8 *)&flow->key + key_start, _key, key_len - key_start)) {
return flow;
}
}
return NULL;
}
-void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
+ struct sw_flow_key *key, int key_len)
{
- struct hlist_head *head;
-
- head = find_bucket(table, flow->hash);
- hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
- table->count++;
+ flow->hash = ovs_flow_hash(key, flow_key_start(key), key_len);
+ memcpy(&flow->key, key, sizeof(flow->key));
+ __flow_tbl_insert(table, flow);
}
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
if (tun_id != tun_key->tun_id)
return -EINVAL;
- memcpy(&swkey->tun.tun_key, tun_key, sizeof(swkey->tun.tun_key));
+ memcpy(&swkey->phy.tun.tun_key, tun_key, sizeof(swkey->phy.tun.tun_key));
attrs &= ~(1ULL << OVS_KEY_ATTR_TUN_ID);
attrs &= ~(1ULL << OVS_KEY_ATTR_IPV4_TUNNEL);
} else if (attrs & (1ULL << OVS_KEY_ATTR_TUN_ID)) {
- swkey->tun.tun_key.tun_id = nla_get_be64(a[OVS_KEY_ATTR_TUN_ID]);
- swkey->tun.tun_key.tun_flags |= OVS_FLOW_TNL_F_KEY;
+ swkey->phy.tun.tun_key.tun_id = nla_get_be64(a[OVS_KEY_ATTR_TUN_ID]);
+ swkey->phy.tun.tun_key.tun_flags |= OVS_FLOW_TNL_F_KEY;
attrs &= ~(1ULL << OVS_KEY_ATTR_TUN_ID);
} else if (attrs & (1ULL << OVS_KEY_ATTR_IPV4_TUNNEL)) {
if (!tun_key->ipv4_dst)
return -EINVAL;
- memcpy(&swkey->tun.tun_key, tun_key, sizeof(swkey->tun.tun_key));
+ memcpy(&swkey->phy.tun.tun_key, tun_key, sizeof(swkey->phy.tun.tun_key));
attrs &= ~(1ULL << OVS_KEY_ATTR_IPV4_TUNNEL);
}
* get the metadata, that is, the parts of the flow key that cannot be
* extracted from the packet itself.
*/
-int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
- struct ovs_key_ipv4_tunnel *tun_key,
- const struct nlattr *attr)
+
+int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len, const struct nlattr *attr)
{
+ struct ovs_key_ipv4_tunnel *tun_key = &flow->key.phy.tun.tun_key;
const struct nlattr *nla;
int rem;
- __be64 tun_id;
+ __be64 tun_id = 0;
- *in_port = DP_MAX_PORTS;
- memset(tun_key, 0, sizeof(*tun_key));
- *priority = 0;
+ flow->key.phy.in_port = DP_MAX_PORTS;
+ flow->key.phy.priority = 0;
+ memset(tun_key, 0, sizeof(flow->key.phy.tun.tun_key));
nla_for_each_nested(nla, attr, rem) {
int type = nla_type(nla);
switch (type) {
case OVS_KEY_ATTR_PRIORITY:
- *priority = nla_get_u32(nla);
+ flow->key.phy.priority = nla_get_u32(nla);
break;
case OVS_KEY_ATTR_TUN_ID:
case OVS_KEY_ATTR_IN_PORT:
if (nla_get_u32(nla) >= DP_MAX_PORTS)
return -EINVAL;
- *in_port = nla_get_u32(nla);
+ flow->key.phy.in_port = nla_get_u32(nla);
break;
}
}
}
if (rem)
return -EINVAL;
+
+ flow->hash = ovs_flow_hash(&flow->key,
+ flow_key_start(&flow->key), key_len);
+
return 0;
}
nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
goto nla_put_failure;
- if (swkey->tun.tun_key.ipv4_dst) {
+ if (swkey->phy.tun.tun_key.ipv4_dst) {
struct ovs_key_ipv4_tunnel *tun_key;
nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4_TUNNEL, sizeof(*tun_key));
if (!nla)
goto nla_put_failure;
tun_key = nla_data(nla);
- memcpy(tun_key, &swkey->tun.tun_key, sizeof(*tun_key));
+ memcpy(tun_key, &swkey->phy.tun.tun_key, sizeof(*tun_key));
}
- if ((swkey->tun.tun_key.tun_flags & OVS_FLOW_TNL_F_KEY) &&
- nla_put_be64(skb, OVS_KEY_ATTR_TUN_ID, swkey->tun.tun_key.tun_id))
+ if ((swkey->phy.tun.tun_key.tun_flags & OVS_FLOW_TNL_F_KEY) &&
+ nla_put_be64(skb, OVS_KEY_ATTR_TUN_ID, swkey->phy.tun.tun_key.tun_id))
goto nla_put_failure;
if (swkey->phy.in_port != DP_MAX_PORTS &&
struct sw_flow_key {
struct {
+ union {
+ struct ovs_key_ipv4_tunnel tun_key; /* Encapsulating tunnel key. */
+ } tun;
u32 priority; /* Packet QoS priority. */
u16 in_port; /* Input switch port (or DP_MAX_PORTS). */
} phy;
- struct {
- struct ovs_key_ipv4_tunnel tun_key; /* Encapsulating tunnel key. */
- } tun;
struct {
u8 src[ETH_ALEN]; /* Ethernet source address. */
u8 dst[ETH_ALEN]; /* Ethernet destination address. */
int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
const struct nlattr *);
-int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
- struct ovs_key_ipv4_tunnel *tun_key,
- const struct nlattr *);
+int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len,
+ const struct nlattr *attr);
#define MAX_ACTIONS_BUFSIZE (16 * 1024)
#define TBL_MIN_BUCKETS 1024
struct flow_table *ovs_flow_tbl_alloc(int new_size);
struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table);
-void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow);
+void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
+ struct sw_flow_key *key, int key_len);
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
-u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len);
struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1];