return ERR_PTR(-ENOMEM);
spin_lock_init(&flow->lock);
+ atomic_set(&flow->refcnt, 1);
+ flow->dead = false;
return flow;
}
-void flow_free(struct sw_flow *flow)
-{
- if (unlikely(!flow))
- return;
-
- kmem_cache_free(flow_cache, flow);
-}
-
-/* Frees the entire 'flow' (both base and actions) immediately. */
-static void flow_free_full(struct sw_flow *flow)
-{
- kfree(flow->sf_acts);
- flow_free(flow);
-}
-
void flow_free_tbl(struct tbl_node *node)
{
struct sw_flow *flow = flow_cast(node);
- flow_free_full(flow);
+
+ flow->dead = true;
+ flow_put(flow);
}
/* RCU callback used by flow_deferred_free. */
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
- flow_free_full(flow);
+
+ flow->dead = true;
+ flow_put(flow);
}
/* Schedules 'flow' to be freed after the next RCU grace period.
call_rcu(&flow->rcu, rcu_free_flow_callback);
}
+void flow_hold(struct sw_flow *flow)
+{
+ atomic_inc(&flow->refcnt);
+}
+
+void flow_put(struct sw_flow *flow)
+{
+ if (unlikely(!flow))
+ return;
+
+ if (atomic_dec_and_test(&flow->refcnt)) {
+ kfree(flow->sf_acts);
+ kmem_cache_free(flow_cache, flow);
+ }
+}
+
/* RCU callback used by flow_deferred_free_acts. */
static void rcu_free_acts_callback(struct rcu_head *rcu)
{
struct odp_flow_key key;
struct sw_flow_actions *sf_acts;
+ atomic_t refcnt;
+ bool dead;
+
spinlock_t lock; /* Lock for values below. */
unsigned long used; /* Last used time (in jiffies). */
u64 packet_count; /* Number of packets matched. */
void flow_exit(void);
struct sw_flow *flow_alloc(void);
-void flow_free(struct sw_flow *flow);
void flow_deferred_free(struct sw_flow *);
void flow_free_tbl(struct tbl_node *);
struct sw_flow_actions *flow_actions_alloc(size_t n_actions);
void flow_deferred_free_acts(struct sw_flow_actions *);
+void flow_hold(struct sw_flow *);
+void flow_put(struct sw_flow *);
+
int flow_extract(struct sk_buff *, u16 in_port, struct odp_flow_key *);
void flow_used(struct sw_flow *, struct sk_buff *);