Header caching previously required the ability to maintain the lifetime
of flows across RCU boundaries. However, now that header caching is
gone we can simplfy the code and make it match the upstream version.
Signed-off-by: Jesse Gross <jesse@nicira.com>
Acked-by: Pravin B Shelar <pshelar@nicira.com>
err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
if (err)
err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
if (err)
err = ovs_flow_metadata_from_nlattrs(flow, key_len, a[OVS_PACKET_ATTR_KEY]);
if (err)
err = ovs_flow_metadata_from_nlattrs(flow, key_len, a[OVS_PACKET_ATTR_KEY]);
if (err)
err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
if (err)
err = validate_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0);
if (err)
acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
err = PTR_ERR(acts);
if (IS_ERR(acts))
acts = ovs_flow_actions_alloc(a[OVS_PACKET_ATTR_ACTIONS]);
err = PTR_ERR(acts);
if (IS_ERR(acts))
rcu_assign_pointer(flow->sf_acts, acts);
OVS_CB(packet)->flow = flow;
rcu_assign_pointer(flow->sf_acts, acts);
OVS_CB(packet)->flow = flow;
local_bh_enable();
rcu_read_unlock();
local_bh_enable();
rcu_read_unlock();
return err;
err_unlock:
rcu_read_unlock();
return err;
err_unlock:
rcu_read_unlock();
-err_flow_put:
- ovs_flow_put(flow);
+err_flow_free:
+ ovs_flow_free(flow);
err_kfree_skb:
kfree_skb(packet);
err:
err_kfree_skb:
kfree_skb(packet);
err:
return 0;
error_free_flow:
return 0;
error_free_flow:
return ERR_PTR(-ENOMEM);
spin_lock_init(&flow->lock);
return ERR_PTR(-ENOMEM);
spin_lock_init(&flow->lock);
- atomic_set(&flow->refcnt, 1);
-static void flow_free(struct sw_flow *flow)
-{
- flow->dead = true;
- ovs_flow_put(flow);
-}
-
void ovs_flow_tbl_destroy(struct flow_table *table)
{
int i;
void ovs_flow_tbl_destroy(struct flow_table *table)
{
int i;
hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
hlist_del_rcu(&flow->hash_node[ver]);
hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
hlist_del_rcu(&flow->hash_node[ver]);
return __flow_tbl_rehash(table, table->n_buckets * 2);
}
return __flow_tbl_rehash(table, table->n_buckets * 2);
}
+void ovs_flow_free(struct sw_flow *flow)
+{
+ if (unlikely(!flow))
+ return;
+
+ kfree((struct sf_flow_acts __force *)flow->sf_acts);
+ kmem_cache_free(flow_cache, flow);
+}
+
/* RCU callback used by ovs_flow_deferred_free. */
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
/* RCU callback used by ovs_flow_deferred_free. */
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
- flow->dead = true;
- ovs_flow_put(flow);
}
/* Schedules 'flow' to be freed after the next RCU grace period.
}
/* Schedules 'flow' to be freed after the next RCU grace period.
call_rcu(&flow->rcu, rcu_free_flow_callback);
}
call_rcu(&flow->rcu, rcu_free_flow_callback);
}
-void ovs_flow_hold(struct sw_flow *flow)
-{
- atomic_inc(&flow->refcnt);
-}
-
-void ovs_flow_put(struct sw_flow *flow)
-{
- if (unlikely(!flow))
- return;
-
- if (atomic_dec_and_test(&flow->refcnt)) {
- kfree((struct sf_flow_acts __force *)flow->sf_acts);
- kmem_cache_free(flow_cache, flow);
- }
-}
-
/* RCU callback used by ovs_flow_deferred_free_acts. */
static void rcu_free_acts_callback(struct rcu_head *rcu)
{
/* RCU callback used by ovs_flow_deferred_free_acts. */
static void rcu_free_acts_callback(struct rcu_head *rcu)
{
struct sw_flow_key key;
struct sw_flow_actions __rcu *sf_acts;
struct sw_flow_key key;
struct sw_flow_actions __rcu *sf_acts;
- atomic_t refcnt;
- bool dead;
-
spinlock_t lock; /* Lock for values below. */
unsigned long used; /* Last used time (in jiffies). */
u64 packet_count; /* Number of packets matched. */
spinlock_t lock; /* Lock for values below. */
unsigned long used; /* Last used time (in jiffies). */
u64 packet_count; /* Number of packets matched. */
struct sw_flow *ovs_flow_alloc(void);
void ovs_flow_deferred_free(struct sw_flow *);
struct sw_flow *ovs_flow_alloc(void);
void ovs_flow_deferred_free(struct sw_flow *);
+void ovs_flow_free(struct sw_flow *);
struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *);
void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *);
void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
-void ovs_flow_hold(struct sw_flow *);
-void ovs_flow_put(struct sw_flow *);
-
int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
int *key_lenp);
void ovs_flow_used(struct sw_flow *, struct sk_buff *);
int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
int *key_lenp);
void ovs_flow_used(struct sw_flow *, struct sk_buff *);