struct sw_table_dummy {
struct sw_table swt;
- spinlock_t lock;
unsigned int max_flows;
atomic_t n_flows;
struct list_head flows;
struct sw_flow *flow;
unsigned int count = 0;
- list_for_each_entry_rcu (flow, &td->flows, node) {
+ list_for_each_entry (flow, &td->flows, node) {
if (flow_del_matches(&flow->key, key, strict)
&& (!strict || (flow->priority == priority)))
count += do_delete(swt, flow);
int i = 0;
mutex_lock(&dp_mutex);
- list_for_each_entry_rcu (flow, &td->flows, node) {
+ list_for_each_entry (flow, &td->flows, node) {
/* xxx Retrieve the packet count associated with this entry
* xxx and store it in "packet_count".
*/
unsigned long start;
start = ~position->private[0];
- list_for_each_entry_rcu (flow, &tl->iter_flows, iter_node) {
+ list_for_each_entry (flow, &tl->iter_flows, iter_node) {
if (flow->serial <= start && flow_matches(key, &flow->key)) {
int error = callback(flow, private);
if (error) {
atomic_set(&td->n_flows, 0);
INIT_LIST_HEAD(&td->flows);
INIT_LIST_HEAD(&td->iter_flows);
- spin_lock_init(&td->lock);
td->next_serial = 0;
INIT_LIST_HEAD(&pending_free_list);
struct sw_table_hash {
struct sw_table swt;
- spinlock_t lock;
struct crc32 crc32;
atomic_t n_flows;
unsigned int bucket_mask; /* Number of buckets minus 1. */
{
struct sw_table_hash *th = (struct sw_table_hash *) swt;
struct sw_flow **bucket;
- unsigned long int flags;
int retval;
if (flow->key.wildcards != 0)
return 0;
- spin_lock_irqsave(&th->lock, flags);
bucket = find_bucket(swt, &flow->key);
if (*bucket == NULL) {
atomic_inc(&th->n_flows);
retval = 0;
}
}
- spin_unlock_irqrestore(&th->lock, flags);
return retval;
}
swt->iterate = table_hash_iterate;
swt->stats = table_hash_stats;
- spin_lock_init(&th->lock);
crc32_init(&th->crc32, polynomial);
atomic_set(&th->n_flows, 0);
struct sw_table_linear {
struct sw_table swt;
- spinlock_t lock;
unsigned int max_flows;
atomic_t n_flows;
struct list_head flows;
static int table_linear_insert(struct sw_table *swt, struct sw_flow *flow)
{
struct sw_table_linear *tl = (struct sw_table_linear *) swt;
- unsigned long int flags;
struct sw_flow *f;
* always be placed behind those with equal priority. Just replace
* any flows that match exactly.
*/
- spin_lock_irqsave(&tl->lock, flags);
- list_for_each_entry_rcu (f, &tl->flows, node) {
+ list_for_each_entry (f, &tl->flows, node) {
if (f->priority == flow->priority
&& f->key.wildcards == flow->key.wildcards
&& flow_matches(&f->key, &flow->key)
flow->serial = f->serial;
list_replace_rcu(&f->node, &flow->node);
list_replace_rcu(&f->iter_node, &flow->iter_node);
- spin_unlock_irqrestore(&tl->lock, flags);
flow_deferred_free(f);
return 1;
}
/* Make sure there's room in the table. */
if (atomic_read(&tl->n_flows) >= tl->max_flows) {
- spin_unlock_irqrestore(&tl->lock, flags);
return 0;
}
atomic_inc(&tl->n_flows);
flow->serial = tl->next_serial++;
list_add_tail_rcu(&flow->node, &f->node);
list_add_rcu(&flow->iter_node, &tl->iter_flows);
- spin_unlock_irqrestore(&tl->lock, flags);
return 1;
}
struct sw_flow *flow;
unsigned int count = 0;
- list_for_each_entry_rcu (flow, &tl->flows, node) {
+ list_for_each_entry (flow, &tl->flows, node) {
if (flow_del_matches(&flow->key, key, strict)
&& (!strict || (flow->priority == priority)))
count += do_delete(swt, flow);
int count = 0;
mutex_lock(&dp_mutex);
- list_for_each_entry_rcu (flow, &tl->flows, node) {
+ list_for_each_entry (flow, &tl->flows, node) {
if (flow_timeout(flow)) {
count += do_delete(swt, flow);
if (dp->flags & OFPC_SEND_FLOW_EXP)
unsigned long start;
start = position->private[0];
- list_for_each_entry_rcu (flow, &tl->iter_flows, iter_node) {
+ list_for_each_entry (flow, &tl->iter_flows, iter_node) {
if (flow->serial >= start && flow_matches(key, &flow->key)) {
int error = callback(flow, private);
if (error) {
atomic_set(&tl->n_flows, 0);
INIT_LIST_HEAD(&tl->flows);
INIT_LIST_HEAD(&tl->iter_flows);
- spin_lock_init(&tl->lock);
tl->next_serial = 0;
return swt;