Flow deletion is already fully serialized on dp_mutex.
memset(to->pad, '\0', sizeof(to->pad));
}
-/* Returns true if 'flow' can be deleted and set up for a deferred free, false
- * if deletion has already been scheduled (by another thread).
- *
- * Caller must hold rcu_read_lock. */
-int flow_del(struct sw_flow *flow)
-{
- return !atomic_cmpxchg(&flow->deleted, 0, 1);
-}
-EXPORT_SYMBOL(flow_del);
-
/* Allocates and returns a new flow with 'n_actions' action, using allocation
* flags 'flags'. Returns the new flow or a null pointer on failure. */
struct sw_flow *flow_alloc(int n_actions, gfp_t flags)
#define FLOW_H 1
#include <linux/kernel.h>
-#include <asm/atomic.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/types.h>
/* Locking:
*
* - Readers must take rcu_read_lock and hold it the entire time that the flow
- * must continue to exist. Readers need not take delete_lock. They *may*
- * examine 'deleted' *if* it is important not to read stale data.
+ * must continue to exist.
*
- * - Deleters must take rcu_read_lock and call flow_del to verify that another
- * thread has not already deleted the flow. If not, do a deferred free of
- * the flow with call_rcu, then rcu_assign_pointer or [h]list_del_rcu the
- * flow.
- *
- * - In-place update not yet contemplated.
+ * - Writers must hold dp_mutex.
*/
struct sw_flow {
struct sw_flow_key key;
uint64_t packet_count; /* Number of packets associated with this entry */
uint64_t byte_count; /* Number of bytes associated with this entry */
- atomic_t deleted; /* 0 if not deleted, 1 if deleted. */
struct rcu_head rcu;
};
void flow_free(struct sw_flow *);
void flow_deferred_free(struct sw_flow *);
void flow_extract(struct sk_buff *, uint16_t in_port, struct sw_flow_key *);
-int flow_del(struct sw_flow *);
void flow_extract_match(struct sw_flow_key* to, const struct ofp_match* from);
void flow_fill_match(struct ofp_match* to, const struct sw_flow_key* from);
flow->init_time = jiffies;
flow->byte_count = 0;
flow->packet_count = 0;
- atomic_set(&flow->deleted, 0);
spin_lock_init(&flow->lock);
memcpy(flow->actions, ofm->actions, n_acts * sizeof *flow->actions);
static int do_delete(struct sw_table *swt, struct sw_flow *flow)
{
- if (flow_del(flow)) {
- list_del_rcu(&flow->node);
- list_del_rcu(&flow->iter_node);
- table_dummy_flow_deferred_free(flow);
- return 1;
- }
- return 0;
+ list_del_rcu(&flow->node);
+ list_del_rcu(&flow->iter_node);
+ table_dummy_flow_deferred_free(flow);
+ return 1;
}
static int table_dummy_delete(struct sw_table *swt,
retval = 1;
} else {
struct sw_flow *old_flow = *bucket;
- if (!memcmp(&old_flow->key, &flow->key, sizeof flow->key)
- && flow_del(old_flow)) {
+ if (!memcmp(&old_flow->key, &flow->key, sizeof flow->key)) {
rcu_assign_pointer(*bucket, flow);
flow_deferred_free(old_flow);
retval = 1;
/* Caller must update n_flows. */
static int do_delete(struct sw_flow **bucket, struct sw_flow *flow)
{
- if (flow_del(flow)) {
- rcu_assign_pointer(*bucket, NULL);
- flow_deferred_free(flow);
- return 1;
- }
- return 0;
+ rcu_assign_pointer(*bucket, NULL);
+ flow_deferred_free(flow);
+ return 1;
}
/* Returns number of deleted flows. We can ignore the priority
list_for_each_entry (f, &tl->flows, node) {
if (f->priority == flow->priority
&& f->key.wildcards == flow->key.wildcards
- && flow_matches(&f->key, &flow->key)
- && flow_del(f)) {
+ && flow_matches(&f->key, &flow->key)) {
flow->serial = f->serial;
list_replace_rcu(&f->node, &flow->node);
list_replace_rcu(&f->iter_node, &flow->iter_node);
static int do_delete(struct sw_table *swt, struct sw_flow *flow)
{
- if (flow_del(flow)) {
- list_del_rcu(&flow->node);
- list_del_rcu(&flow->iter_node);
- flow_deferred_free(flow);
- return 1;
- }
- return 0;
+ list_del_rcu(&flow->node);
+ list_del_rcu(&flow->iter_node);
+ flow_deferred_free(flow);
+ return 1;
}
static int table_linear_delete(struct sw_table *swt,