/* Searches 'chain' for a flow matching 'key', which must not have any wildcard
* fields. Returns the flow if successful, otherwise a null pointer.
*
- * Caller must hold rcu_read_lock, and not release it until it is done with the
- * returned flow. */
+ * Caller must hold rcu_read_lock or dp_mutex. */
struct sw_flow *chain_lookup(struct sw_chain *chain,
const struct sw_flow_key *key)
{
* If successful, 'flow' becomes owned by the chain, otherwise it is retained
* by the caller.
*
- * Caller must hold rcu_read_lock. If insertion is successful, it must not
- * release rcu_read_lock until it is done with the inserted flow. */
+ * Caller must hold dp_mutex. */
int chain_insert(struct sw_chain *chain, struct sw_flow *flow)
{
int i;
+ might_sleep();
for (i = 0; i < chain->n_tables; i++) {
struct sw_table *t = chain->tables[i];
if (t->insert(t, flow))
* iterating through the entire contents of each table for keys that contain
* wildcards. Relatively cheap for fully specified keys.
*
- * The caller need not hold any locks. */
+ * Caller must hold dp_mutex. */
int chain_delete(struct sw_chain *chain, const struct sw_flow_key *key,
uint16_t priority, int strict)
{
int count = 0;
int i;
+ might_sleep();
for (i = 0; i < chain->n_tables; i++) {
struct sw_table *t = chain->tables[i];
- rcu_read_lock();
count += t->delete(t, key, priority, strict);
- rcu_read_unlock();
}
return count;
* Expensive as currently implemented, since it iterates through the entire
* contents of each table.
*
- * The caller need not hold any locks. */
+ * Caller must not hold dp_mutex, because individual tables take and release it
+ * as necessary. */
int chain_timeout(struct sw_chain *chain)
{
int count = 0;
int i;
+ might_sleep();
for (i = 0; i < chain->n_tables; i++) {
struct sw_table *t = chain->tables[i];
- rcu_read_lock();
count += t->timeout(chain->dp, t);
- rcu_read_unlock();
}
return count;
}
/* It's hard to imagine wanting more than one datapath, but... */
#define DP_MAX 32
-/* datapaths. Protected on the read side by rcu_read_lock, on the write side
- * by dp_mutex.
+/* Datapaths. Protected on the read side by rcu_read_lock, on the write side
+ * by dp_mutex. dp_mutex is almost completely redundant with genl_mutex
+ * maintained by the Generic Netlink code, but the timeout path needs mutual
+ * exclusion too.
*
* It is safe to access the datapath and net_bridge_port structures with just
- * the dp_mutex, but to access the chain you need to take the rcu_read_lock
- * also (because dp_mutex doesn't prevent flows from being destroyed).
+ * dp_mutex.
*/
static struct datapath *dps[DP_MAX];
-static DEFINE_MUTEX(dp_mutex);
+DEFINE_MUTEX(dp_mutex);
static int dp_maint_func(void *data);
static int send_port_status(struct net_bridge_port *p, uint8_t status);
}
/* Creates a new datapath numbered 'dp_idx'. Returns 0 for success or a
- * negative error code.
- *
- * Not called with any locks. */
+ * negative error code. */
static int new_dp(int dp_idx)
{
struct datapath *dp;
if (!try_module_get(THIS_MODULE))
return -ENODEV;
- mutex_lock(&dp_mutex);
- dp = rcu_dereference(dps[dp_idx]);
- if (dp != NULL) {
+ /* Exit early if a datapath with that number already exists. */
+ if (dps[dp_idx]) {
err = -EEXIST;
goto err_unlock;
}
if (IS_ERR(dp->dp_task))
goto err_destroy_chain;
- rcu_assign_pointer(dps[dp_idx], dp);
- mutex_unlock(&dp_mutex);
+ dps[dp_idx] = dp;
return 0;
err_free_dp:
kfree(dp);
err_unlock:
- mutex_unlock(&dp_mutex);
module_put(THIS_MODULE);
return err;
}
-/* Find and return a free port number under 'dp'. Called under dp_mutex. */
+/* Find and return a free port number under 'dp'. */
static int find_portno(struct datapath *dp)
{
int i;
return p;
}
-/* Called with dp_mutex. */
int add_switch_port(struct datapath *dp, struct net_device *dev)
{
struct net_bridge_port *p;
return 0;
}
-/* Delete 'p' from switch.
- * Called with dp_mutex. */
+/* Delete 'p' from switch. */
static int del_switch_port(struct net_bridge_port *p)
{
/* First drop references to device. */
return 0;
}
-/* Called with dp_mutex. */
static void del_dp(struct datapath *dp)
{
struct net_bridge_port *p, *n;
}
#else
/* NB: This has only been tested on 2.4.35 */
-
-/* Called without any locks (?) */
static void dp_frame_hook(struct sk_buff *skb)
{
struct net_bridge_port *p = skb->dev->br_port;
if (!info->attrs[DP_GENL_A_DP_IDX])
return -EINVAL;
- mutex_lock(&dp_mutex);
dp = dp_get(nla_get_u32((info->attrs[DP_GENL_A_DP_IDX])));
if (!dp)
err = -ENOENT;
del_dp(dp);
err = 0;
}
- mutex_unlock(&dp_mutex);
return err;
}
return -EINVAL;
/* Get datapath. */
- mutex_lock(&dp_mutex);
dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
if (!dp) {
err = -ENOENT;
out_put:
dev_put(port);
out:
- mutex_unlock(&dp_mutex);
return err;
}
if (!info->attrs[DP_GENL_A_DP_IDX] || !va)
return -EINVAL;
- rcu_read_lock();
dp = dp_get(nla_get_u32(info->attrs[DP_GENL_A_DP_IDX]));
- if (!dp) {
- err = -ENOENT;
- goto out;
- }
+ if (!dp)
+ return -ENOENT;
- if (nla_len(va) < sizeof(struct ofp_header)) {
- err = -EINVAL;
- goto out;
- }
+ if (nla_len(va) < sizeof(struct ofp_header))
+ return -EINVAL;
oh = nla_data(va);
sender.xid = oh->xid;
sender.pid = info->snd_pid;
sender.seq = info->snd_seq;
- err = fwd_control_input(dp->chain, &sender, nla_data(va), nla_len(va));
-out:
- rcu_read_unlock();
+ mutex_lock(&dp_mutex);
+ err = fwd_control_input(dp->chain, &sender,
+ nla_data(va), nla_len(va));
+ mutex_unlock(&dp_mutex);
return err;
}
* struct genl_ops. This kluge supports earlier versions also. */
cb->done = dp_genl_openflow_done;
- rcu_read_lock();
if (!cb->args[0]) {
struct nlattr *attrs[DP_GENL_A_MAX + 1];
struct ofp_stats_request *rq;
if (err < 0)
return err;
- err = -EINVAL;
-
if (!attrs[DP_GENL_A_DP_IDX])
- goto out;
+ return -EINVAL;
dp_idx = nla_get_u16(attrs[DP_GENL_A_DP_IDX]);
dp = dp_get(dp_idx);
- if (!dp) {
- err = -ENOENT;
- goto out;
- }
+ if (!dp)
+ return -ENOENT;
va = attrs[DP_GENL_A_OPENFLOW];
len = nla_len(va);
if (!va || len < sizeof *rq)
- goto out;
+ return -EINVAL;
rq = nla_data(va);
type = ntohs(rq->type);
|| ntohs(rq->header.length) != len
|| type >= ARRAY_SIZE(stats)
|| !stats[type].dump)
- goto out;
+ return -EINVAL;
s = &stats[type];
body_len = len - offsetof(struct ofp_stats_request, body);
if (body_len < s->min_body || body_len > s->max_body)
- goto out;
+ return -EINVAL;
cb->args[0] = 1;
cb->args[1] = dp_idx;
void *state;
err = s->init(dp, rq->body, body_len, &state);
if (err)
- goto out;
+ return err;
cb->args[4] = (long) state;
}
} else if (cb->args[0] == 1) {
s = &stats[cb->args[2]];
dp = dp_get(dp_idx);
- if (!dp) {
- err = -ENOENT;
- goto out;
- }
+ if (!dp)
+ return -ENOENT;
} else {
- err = 0;
- goto out;
+ return 0;
}
sender.xid = cb->args[3];
osr = put_openflow_headers(dp, skb, OFPT_STATS_REPLY, &sender,
&max_openflow_len);
- if (IS_ERR(osr)) {
- err = PTR_ERR(osr);
- goto out;
- }
+ if (IS_ERR(osr))
+ return PTR_ERR(osr);
osr->type = htons(s - stats);
osr->flags = 0;
resize_openflow_skb(skb, &osr->header, max_openflow_len);
err = skb->len;
}
-out:
- rcu_read_unlock();
return err;
}
#ifndef DATAPATH_H
#define DATAPATH_H 1
+#include <linux/mutex.h>
#include <linux/netlink.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
uint32_t seq; /* Netlink sequence ID of request. */
};
+extern struct mutex dp_mutex;
+
int dp_output_port(struct datapath *, struct sk_buff *, int out_port);
int dp_output_control(struct datapath *, struct sk_buff *, uint32_t,
size_t, int);
static int dp_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct dp_dev *dp_dev = dp_dev_priv(netdev);
- struct datapath *dp;
+ struct datapath *dp = dp_dev->dp;
+
+ dp_dev->stats.tx_packets++;
+ dp_dev->stats.tx_bytes += skb->len;
+
+ skb_reset_mac_header(skb);
rcu_read_lock();
- dp = dp_dev->dp;
- if (likely(dp != NULL)) {
- dp_dev->stats.tx_packets++;
- dp_dev->stats.tx_bytes += skb->len;
- skb_reset_mac_header(skb);
- fwd_port_input(dp->chain, skb, OFPP_LOCAL);
- } else {
- dp_dev->stats.tx_dropped++;
- kfree_skb(skb);
- }
+ fwd_port_input(dp->chain, skb, OFPP_LOCAL);
rcu_read_unlock();
+
return 0;
}
void dp_dev_destroy(struct datapath *dp)
{
- struct dp_dev *dp_dev = dp_dev_priv(dp->netdev);
- dp_dev->dp = NULL;
+ netif_tx_disable(dp->netdev);
synchronize_net();
unregister_netdev(dp->netdev);
free_netdev(dp->netdev);
memset(to->pad, '\0', sizeof(to->pad));
}
-/* Returns true if 'flow' can be deleted and set up for a deferred free, false
- * if deletion has already been scheduled (by another thread).
- *
- * Caller must hold rcu_read_lock. */
-int flow_del(struct sw_flow *flow)
-{
- return !atomic_cmpxchg(&flow->deleted, 0, 1);
-}
-EXPORT_SYMBOL(flow_del);
-
/* Allocates and returns a new flow with 'n_actions' action, using allocation
* flags 'flags'. Returns the new flow or a null pointer on failure. */
struct sw_flow *flow_alloc(int n_actions, gfp_t flags)
#define FLOW_H 1
#include <linux/kernel.h>
-#include <asm/atomic.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/types.h>
/* Locking:
*
* - Readers must take rcu_read_lock and hold it the entire time that the flow
- * must continue to exist. Readers need not take delete_lock. They *may*
- * examine 'deleted' *if* it is important not to read stale data.
+ * must continue to exist.
*
- * - Deleters must take rcu_read_lock and call flow_del to verify that another
- * thread has not already deleted the flow. If not, do a deferred free of
- * the flow with call_rcu, then rcu_assign_pointer or [h]list_del_rcu the
- * flow.
- *
- * - In-place update not yet contemplated.
+ * - Writers must hold dp_mutex.
*/
struct sw_flow {
struct sw_flow_key key;
uint64_t packet_count; /* Number of packets associated with this entry */
uint64_t byte_count; /* Number of bytes associated with this entry */
- atomic_t deleted; /* 0 if not deleted, 1 if deleted. */
struct rcu_head rcu;
};
void flow_free(struct sw_flow *);
void flow_deferred_free(struct sw_flow *);
void flow_extract(struct sk_buff *, uint16_t in_port, struct sw_flow_key *);
-int flow_del(struct sw_flow *);
void flow_extract_match(struct sw_flow_key* to, const struct ofp_match* from);
void flow_fill_match(struct ofp_match* to, const struct sw_flow_key* from);
flow->init_time = jiffies;
flow->byte_count = 0;
flow->packet_count = 0;
- atomic_set(&flow->deleted, 0);
spin_lock_init(&flow->lock);
memcpy(flow->actions, ofm->actions, n_acts * sizeof *flow->actions);
#define DUMMY_MAX_FLOW 8192
-/* xxx Explain need for this separate list because of RCU */
-static spinlock_t pending_free_lock;
-static struct list_head pending_free_list;
-
/* sw_flow private data for dummy table entries. */
struct sw_flow_dummy {
struct list_head node;
struct sw_table_dummy {
struct sw_table swt;
- spinlock_t lock;
unsigned int max_flows;
- atomic_t n_flows;
+ unsigned int n_flows;
struct list_head flows;
struct list_head iter_flows;
unsigned long int next_serial;
};
-static void table_dummy_sfw_destroy(struct sw_flow_dummy *sfw)
-{
- /* xxx Remove the entry from hardware. If you need to do any other
- * xxx clean-up associated with the entry, do it here.
- */
-
- kfree(sfw);
-}
-
-static void table_dummy_rcu_callback(struct rcu_head *rcu)
-{
- struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
-
- spin_lock(&pending_free_lock);
- if (flow->private) {
- struct sw_flow_dummy *sfw = flow->private;
- list_add(&sfw->node, &pending_free_list);
- flow->private = NULL;
- }
- spin_unlock(&pending_free_lock);
- flow_free(flow);
-}
-
-static void table_dummy_flow_deferred_free(struct sw_flow *flow)
-{
- call_rcu(&flow->rcu, table_dummy_rcu_callback);
-}
-
static struct sw_flow *table_dummy_lookup(struct sw_table *swt,
const struct sw_flow_key *key)
{
/* xxx Do whatever needs to be done to insert an entry in hardware.
* xxx If the entry can't be inserted, return 0. This stub code
* xxx doesn't do anything yet, so we're going to return 0...you
- * xxx shouldn't.
+ * xxx shouldn't (and you should update n_flows in struct
+ * xxx sw_table_dummy, too).
*/
kfree(flow->private);
return 0;
static int do_delete(struct sw_table *swt, struct sw_flow *flow)
{
- if (flow_del(flow)) {
- list_del_rcu(&flow->node);
- list_del_rcu(&flow->iter_node);
- table_dummy_flow_deferred_free(flow);
- return 1;
- }
- return 0;
+ /* xxx Remove the entry from hardware. If you need to do any other
+ * xxx clean-up associated with the entry, do it here.
+ */
+ list_del_rcu(&flow->node);
+ list_del_rcu(&flow->iter_node);
+ return 1;
}
static int table_dummy_delete(struct sw_table *swt,
struct sw_flow *flow;
unsigned int count = 0;
- list_for_each_entry_rcu (flow, &td->flows, node) {
+ list_for_each_entry (flow, &td->flows, node) {
if (flow_del_matches(&flow->key, key, strict)
&& (!strict || (flow->priority == priority)))
count += do_delete(swt, flow);
}
- if (count)
- atomic_sub(count, &td->n_flows);
+ td->n_flows -= count;
return count;
}
{
struct sw_table_dummy *td = (struct sw_table_dummy *) swt;
struct sw_flow *flow;
- struct sw_flow_dummy *sfw, *n;
int del_count = 0;
uint64_t packet_count = 0;
int i = 0;
- list_for_each_entry_rcu (flow, &td->flows, node) {
+ mutex_lock(&dp_mutex);
+ list_for_each_entry (flow, &td->flows, node) {
/* xxx Retrieve the packet count associated with this entry
* xxx and store it in "packet_count".
*/
}
del_count += do_delete(swt, flow);
}
- if ((i % 50) == 0) {
- msleep_interruptible(1);
- }
i++;
}
+ mutex_unlock(&dp_mutex);
- /* Remove any entries queued for removal */
- spin_lock_bh(&pending_free_lock);
- list_for_each_entry_safe (sfw, n, &pending_free_list, node) {
- list_del(&sfw->node);
- table_dummy_sfw_destroy(sfw);
- }
- spin_unlock_bh(&pending_free_lock);
-
- if (del_count)
- atomic_sub(del_count, &td->n_flows);
+ td->n_flows -= del_count;
return del_count;
}
unsigned long start;
start = ~position->private[0];
- list_for_each_entry_rcu (flow, &tl->iter_flows, iter_node) {
+ list_for_each_entry (flow, &tl->iter_flows, iter_node) {
if (flow->serial <= start && flow_matches(key, &flow->key)) {
int error = callback(flow, private);
if (error) {
{
struct sw_table_dummy *td = (struct sw_table_dummy *) swt;
stats->name = "dummy";
- stats->n_flows = atomic_read(&td->n_flows);
+ stats->n_flows = td->n_flows;
stats->max_flows = td->max_flows;
}
swt->stats = table_dummy_stats;
td->max_flows = DUMMY_MAX_FLOW;
- atomic_set(&td->n_flows, 0);
+ td->n_flows = 0;
INIT_LIST_HEAD(&td->flows);
INIT_LIST_HEAD(&td->iter_flows);
- spin_lock_init(&td->lock);
td->next_serial = 0;
- INIT_LIST_HEAD(&pending_free_list);
- spin_lock_init(&pending_free_lock);
-
return swt;
}
#define __LINUX_KERNEL_WRAPPER_H 1
#include_next <linux/kernel.h>
+#include <linux/config.h>
/**
* container_of - cast a member of a structure out to the containing structure
/* Force a compilation error if condition is true */
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+/**
+ * might_sleep - annotation for functions that can sleep
+ *
+ * this macro will print a stack trace if it is executed in an atomic
+ * context (spinlock, irq-handler, ...).
+ *
+ * This is a useful debugging help to be able to catch problems early and not
+ * be bitten later when the calling function happens to sleep when it is not
+ * supposed to.
+ */
+#define might_resched() do { } while (0)
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ void __might_sleep(char *file, int line);
+# define might_sleep() \
+ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
+#else
+# define might_sleep() do { might_resched(); } while (0)
+#endif
+
+#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
+
#endif
* Distributed under the terms of the GNU GPL version 2.
*/
+#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <asm/hardirq.h>
int vprintk(const char *msg, ...)
{
}
EXPORT_SYMBOL(vprintk);
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+void __might_sleep(char *file, int line)
+{
+ static unsigned long prev_jiffy; /* ratelimiting */
+
+ if ((in_interrupt()) && !oops_in_progress) {
+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+ return;
+ prev_jiffy = jiffies;
+ printk(KERN_ERR "BUG: sleeping function called from invalid"
+ " context at %s:%d\n", file, line);
+ dump_stack();
+ }
+}
+EXPORT_SYMBOL(__might_sleep);
+#endif
struct sw_table_hash {
struct sw_table swt;
- spinlock_t lock;
struct crc32 crc32;
- atomic_t n_flows;
+ unsigned int n_flows;
unsigned int bucket_mask; /* Number of buckets minus 1. */
struct sw_flow **buckets;
};
{
struct sw_table_hash *th = (struct sw_table_hash *) swt;
struct sw_flow **bucket;
- unsigned long int flags;
int retval;
if (flow->key.wildcards != 0)
return 0;
- spin_lock_irqsave(&th->lock, flags);
bucket = find_bucket(swt, &flow->key);
if (*bucket == NULL) {
- atomic_inc(&th->n_flows);
+ th->n_flows++;
rcu_assign_pointer(*bucket, flow);
retval = 1;
} else {
struct sw_flow *old_flow = *bucket;
- if (!memcmp(&old_flow->key, &flow->key, sizeof flow->key)
- && flow_del(old_flow)) {
+ if (!memcmp(&old_flow->key, &flow->key, sizeof flow->key)) {
rcu_assign_pointer(*bucket, flow);
flow_deferred_free(old_flow);
retval = 1;
retval = 0;
}
}
- spin_unlock_irqrestore(&th->lock, flags);
return retval;
}
/* Caller must update n_flows. */
static int do_delete(struct sw_flow **bucket, struct sw_flow *flow)
{
- if (flow_del(flow)) {
- rcu_assign_pointer(*bucket, NULL);
- flow_deferred_free(flow);
- return 1;
- }
- return 0;
+ rcu_assign_pointer(*bucket, NULL);
+ flow_deferred_free(flow);
+ return 1;
}
/* Returns number of deleted flows. We can ignore the priority
count += do_delete(bucket, flow);
}
}
- if (count)
- atomic_sub(count, &th->n_flows);
+ th->n_flows -= count;
return count;
}
unsigned int i;
int count = 0;
+ mutex_lock(&dp_mutex);
for (i = 0; i <= th->bucket_mask; i++) {
struct sw_flow **bucket = &th->buckets[i];
struct sw_flow *flow = *bucket;
dp_send_flow_expired(dp, flow);
}
}
+ th->n_flows -= count;
+ mutex_unlock(&dp_mutex);
- if (count)
- atomic_sub(count, &th->n_flows);
return count;
}
{
struct sw_table_hash *th = (struct sw_table_hash *) swt;
stats->name = "hash";
- stats->n_flows = atomic_read(&th->n_flows);
+ stats->n_flows = th->n_flows;
stats->max_flows = th->bucket_mask + 1;
}
swt->iterate = table_hash_iterate;
swt->stats = table_hash_stats;
- spin_lock_init(&th->lock);
crc32_init(&th->crc32, polynomial);
- atomic_set(&th->n_flows, 0);
+ th->n_flows = 0;
return swt;
}
struct sw_table_linear {
struct sw_table swt;
- spinlock_t lock;
unsigned int max_flows;
- atomic_t n_flows;
+ unsigned int n_flows;
struct list_head flows;
struct list_head iter_flows;
unsigned long int next_serial;
static int table_linear_insert(struct sw_table *swt, struct sw_flow *flow)
{
struct sw_table_linear *tl = (struct sw_table_linear *) swt;
- unsigned long int flags;
struct sw_flow *f;
* always be placed behind those with equal priority. Just replace
* any flows that match exactly.
*/
- spin_lock_irqsave(&tl->lock, flags);
- list_for_each_entry_rcu (f, &tl->flows, node) {
+ list_for_each_entry (f, &tl->flows, node) {
if (f->priority == flow->priority
&& f->key.wildcards == flow->key.wildcards
- && flow_matches(&f->key, &flow->key)
- && flow_del(f)) {
+ && flow_matches(&f->key, &flow->key)) {
flow->serial = f->serial;
list_replace_rcu(&f->node, &flow->node);
list_replace_rcu(&f->iter_node, &flow->iter_node);
- spin_unlock_irqrestore(&tl->lock, flags);
flow_deferred_free(f);
return 1;
}
}
/* Make sure there's room in the table. */
- if (atomic_read(&tl->n_flows) >= tl->max_flows) {
- spin_unlock_irqrestore(&tl->lock, flags);
+ if (tl->n_flows >= tl->max_flows) {
return 0;
}
- atomic_inc(&tl->n_flows);
+ tl->n_flows++;
/* Insert the entry immediately in front of where we're pointing. */
flow->serial = tl->next_serial++;
list_add_tail_rcu(&flow->node, &f->node);
list_add_rcu(&flow->iter_node, &tl->iter_flows);
- spin_unlock_irqrestore(&tl->lock, flags);
return 1;
}
static int do_delete(struct sw_table *swt, struct sw_flow *flow)
{
- if (flow_del(flow)) {
- list_del_rcu(&flow->node);
- list_del_rcu(&flow->iter_node);
- flow_deferred_free(flow);
- return 1;
- }
- return 0;
+ list_del_rcu(&flow->node);
+ list_del_rcu(&flow->iter_node);
+ flow_deferred_free(flow);
+ return 1;
}
static int table_linear_delete(struct sw_table *swt,
struct sw_flow *flow;
unsigned int count = 0;
- list_for_each_entry_rcu (flow, &tl->flows, node) {
+ list_for_each_entry (flow, &tl->flows, node) {
if (flow_del_matches(&flow->key, key, strict)
&& (!strict || (flow->priority == priority)))
count += do_delete(swt, flow);
}
- if (count)
- atomic_sub(count, &tl->n_flows);
+ tl->n_flows -= count;
return count;
}
struct sw_flow *flow;
int count = 0;
- list_for_each_entry_rcu (flow, &tl->flows, node) {
+ mutex_lock(&dp_mutex);
+ list_for_each_entry (flow, &tl->flows, node) {
if (flow_timeout(flow)) {
count += do_delete(swt, flow);
if (dp->flags & OFPC_SEND_FLOW_EXP)
dp_send_flow_expired(dp, flow);
}
}
- if (count)
- atomic_sub(count, &tl->n_flows);
+ tl->n_flows -= count;
+ mutex_unlock(&dp_mutex);
return count;
}
unsigned long start;
start = position->private[0];
- list_for_each_entry_rcu (flow, &tl->iter_flows, iter_node) {
+ list_for_each_entry (flow, &tl->iter_flows, iter_node) {
if (flow->serial >= start && flow_matches(key, &flow->key)) {
int error = callback(flow, private);
if (error) {
{
struct sw_table_linear *tl = (struct sw_table_linear *) swt;
stats->name = "linear";
- stats->n_flows = atomic_read(&tl->n_flows);
+ stats->n_flows = tl->n_flows;
stats->max_flows = tl->max_flows;
}
swt->stats = table_linear_stats;
tl->max_flows = max_flows;
- atomic_set(&tl->n_flows, 0);
+ tl->n_flows = 0;
INIT_LIST_HEAD(&tl->flows);
INIT_LIST_HEAD(&tl->iter_flows);
- spin_lock_init(&tl->lock);
tl->next_serial = 0;
return swt;