From fb8c93473efacd67a50117d0f2a3084f2d96ceca Mon Sep 17 00:00:00 2001 From: Jesse Gross Date: Sun, 29 Aug 2010 09:49:51 -0700 Subject: [PATCH] datapath: Add ref counting for flows. Currently flows are only used within the confines of one rcu_read_lock()/rcu_read_unlock() session. However, with the addition of header caching we will need to hold references to flows for longer periods of time. This adds support for that by adding refcounts to flows. RCU is still used for normal packet handling to avoid a performance impact from constantly updating the refcount. However, instead of directly freeing the flow after a grace period we simply decrement the refcount. Signed-off-by: Jesse Gross Reviewed-by: Ben Pfaff --- datapath/datapath.c | 3 ++- datapath/flow.c | 41 ++++++++++++++++++++++++----------------- datapath/flow.h | 7 ++++++- 3 files changed, 32 insertions(+), 19 deletions(-) diff --git a/datapath/datapath.c b/datapath/datapath.c index 1677927f..06e1006a 100644 --- a/datapath/datapath.c +++ b/datapath/datapath.c @@ -1109,7 +1109,8 @@ static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf, error_free_flow_acts: kfree(flow->sf_acts); error_free_flow: - flow_free(flow); + flow->sf_acts = NULL; + flow_put(flow); error: return error; } diff --git a/datapath/flow.c b/datapath/flow.c index 1f01166c..dfbf7693 100644 --- a/datapath/flow.c +++ b/datapath/flow.c @@ -132,36 +132,27 @@ struct sw_flow *flow_alloc(void) return ERR_PTR(-ENOMEM); spin_lock_init(&flow->lock); + atomic_set(&flow->refcnt, 1); + flow->dead = false; return flow; } -void flow_free(struct sw_flow *flow) -{ - if (unlikely(!flow)) - return; - - kmem_cache_free(flow_cache, flow); -} - -/* Frees the entire 'flow' (both base and actions) immediately. */ -static void flow_free_full(struct sw_flow *flow) -{ - kfree(flow->sf_acts); - flow_free(flow); -} - void flow_free_tbl(struct tbl_node *node) { struct sw_flow *flow = flow_cast(node); - flow_free_full(flow); + + flow->dead = true; + flow_put(flow); } /* RCU callback used by flow_deferred_free. */ static void rcu_free_flow_callback(struct rcu_head *rcu) { struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); - flow_free_full(flow); + + flow->dead = true; + flow_put(flow); } /* Schedules 'flow' to be freed after the next RCU grace period. @@ -171,6 +162,22 @@ void flow_deferred_free(struct sw_flow *flow) call_rcu(&flow->rcu, rcu_free_flow_callback); } +void flow_hold(struct sw_flow *flow) +{ + atomic_inc(&flow->refcnt); +} + +void flow_put(struct sw_flow *flow) +{ + if (unlikely(!flow)) + return; + + if (atomic_dec_and_test(&flow->refcnt)) { + kfree(flow->sf_acts); + kmem_cache_free(flow_cache, flow); + } +} + /* RCU callback used by flow_deferred_free_acts. */ static void rcu_free_acts_callback(struct rcu_head *rcu) { diff --git a/datapath/flow.h b/datapath/flow.h index 484ca120..3f434677 100644 --- a/datapath/flow.h +++ b/datapath/flow.h @@ -36,6 +36,9 @@ struct sw_flow { struct odp_flow_key key; struct sw_flow_actions *sf_acts; + atomic_t refcnt; + bool dead; + spinlock_t lock; /* Lock for values below. */ unsigned long used; /* Last used time (in jiffies). */ u64 packet_count; /* Number of packets matched. */ @@ -62,13 +65,15 @@ int flow_init(void); void flow_exit(void); struct sw_flow *flow_alloc(void); -void flow_free(struct sw_flow *flow); void flow_deferred_free(struct sw_flow *); void flow_free_tbl(struct tbl_node *); struct sw_flow_actions *flow_actions_alloc(size_t n_actions); void flow_deferred_free_acts(struct sw_flow_actions *); +void flow_hold(struct sw_flow *); +void flow_put(struct sw_flow *); + int flow_extract(struct sk_buff *, u16 in_port, struct odp_flow_key *); void flow_used(struct sw_flow *, struct sk_buff *); -- 2.30.2