When deleting flows, only insist on matching priority with strict matching.
[openvswitch] / datapath / table-linear.c
1 /*
2  * Distributed under the terms of the GNU GPL version 2.
3  * Copyright (c) 2007, 2008 The Board of Trustees of The Leland 
4  * Stanford Junior University
5  */
6
7 #include "table.h"
8 #include "flow.h"
9 #include "datapath.h"
10
11 #include <linux/rcupdate.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
14
15 struct sw_table_linear {
16         struct sw_table swt;
17
18         spinlock_t lock;
19         unsigned int max_flows;
20         atomic_t n_flows;
21         struct list_head flows;
22         struct list_head iter_flows;
23         unsigned long int next_serial;
24 };
25
26 static struct sw_flow *table_linear_lookup(struct sw_table *swt,
27                                          const struct sw_flow_key *key)
28 {
29         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
30         struct sw_flow *flow;
31         list_for_each_entry_rcu (flow, &tl->flows, node) {
32                 if (flow_matches(&flow->key, key))
33                         return flow;
34         }
35         return NULL;
36 }
37
38 static int table_linear_insert(struct sw_table *swt, struct sw_flow *flow)
39 {
40         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
41         unsigned long int flags;
42         struct sw_flow *f;
43
44
45         /* Loop through the existing list of entries.  New entries will
46          * always be placed behind those with equal priority.  Just replace 
47          * any flows that match exactly.
48          */
49         spin_lock_irqsave(&tl->lock, flags);
50         list_for_each_entry_rcu (f, &tl->flows, node) {
51                 if (f->priority == flow->priority
52                                 && f->key.wildcards == flow->key.wildcards
53                                 && flow_matches(&f->key, &flow->key)
54                                 && flow_del(f)) {
55                         flow->serial = f->serial;
56                         list_replace_rcu(&f->node, &flow->node);
57                         list_replace_rcu(&f->iter_node, &flow->iter_node);
58                         spin_unlock_irqrestore(&tl->lock, flags);
59                         flow_deferred_free(f);
60                         return 1;
61                 }
62
63                 if (f->priority < flow->priority)
64                         break;
65         }
66
67         /* Make sure there's room in the table. */
68         if (atomic_read(&tl->n_flows) >= tl->max_flows) {
69                 spin_unlock_irqrestore(&tl->lock, flags);
70                 return 0;
71         }
72         atomic_inc(&tl->n_flows);
73
74         /* Insert the entry immediately in front of where we're pointing. */
75         list_add_tail_rcu(&flow->node, &f->node);
76         list_add_rcu(&flow->iter_node, &tl->iter_flows);
77         spin_unlock_irqrestore(&tl->lock, flags);
78         return 1;
79 }
80
81 static int do_delete(struct sw_table *swt, struct sw_flow *flow) 
82 {
83         if (flow_del(flow)) {
84                 list_del_rcu(&flow->node);
85                 list_del_rcu(&flow->iter_node);
86                 flow_deferred_free(flow);
87                 return 1;
88         }
89         return 0;
90 }
91
92 static int table_linear_delete(struct sw_table *swt,
93                                 const struct sw_flow_key *key, uint16_t priority, int strict)
94 {
95         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
96         struct sw_flow *flow;
97         unsigned int count = 0;
98
99         list_for_each_entry_rcu (flow, &tl->flows, node) {
100                 if (flow_del_matches(&flow->key, key, strict)
101                                 && (!strict || (flow->priority == priority)))
102                         count += do_delete(swt, flow);
103         }
104         if (count)
105                 atomic_sub(count, &tl->n_flows);
106         return count;
107 }
108
109 static int table_linear_timeout(struct datapath *dp, struct sw_table *swt)
110 {
111         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
112         struct sw_flow *flow;
113         int count = 0;
114
115         list_for_each_entry_rcu (flow, &tl->flows, node) {
116                 if (flow_timeout(flow)) {
117                         count += do_delete(swt, flow);
118                         if (dp->flags & OFPC_SEND_FLOW_EXP)
119                                 dp_send_flow_expired(dp, flow);
120                 }
121         }
122         if (count)
123                 atomic_sub(count, &tl->n_flows);
124         return count;
125 }
126
127 static void table_linear_destroy(struct sw_table *swt)
128 {
129         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
130
131         while (!list_empty(&tl->flows)) {
132                 struct sw_flow *flow = list_entry(tl->flows.next,
133                                                   struct sw_flow, node);
134                 list_del(&flow->node);
135                 flow_free(flow);
136         }
137         kfree(tl);
138 }
139
140 static int table_linear_iterate(struct sw_table *swt,
141                                 const struct sw_flow_key *key,
142                                 struct sw_table_position *position,
143                                 int (*callback)(struct sw_flow *, void *),
144                                 void *private)
145 {
146         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
147         struct sw_flow *flow;
148         unsigned long start;
149
150         start = ~position->private[0];
151         list_for_each_entry_rcu (flow, &tl->iter_flows, iter_node) {
152                 if (flow->serial <= start && flow_matches(key, &flow->key)) {
153                         int error = callback(flow, private);
154                         if (error) {
155                                 position->private[0] = ~flow->serial;
156                                 return error;
157                         }
158                 }
159         }
160         return 0;
161 }
162
163 static void table_linear_stats(struct sw_table *swt,
164                                 struct sw_table_stats *stats)
165 {
166         struct sw_table_linear *tl = (struct sw_table_linear *) swt;
167         stats->name = "linear";
168         stats->n_flows = atomic_read(&tl->n_flows);
169         stats->max_flows = tl->max_flows;
170 }
171
172
173 struct sw_table *table_linear_create(unsigned int max_flows)
174 {
175         struct sw_table_linear *tl;
176         struct sw_table *swt;
177
178         tl = kzalloc(sizeof *tl, GFP_KERNEL);
179         if (tl == NULL)
180                 return NULL;
181
182         swt = &tl->swt;
183         swt->lookup = table_linear_lookup;
184         swt->insert = table_linear_insert;
185         swt->delete = table_linear_delete;
186         swt->timeout = table_linear_timeout;
187         swt->destroy = table_linear_destroy;
188         swt->iterate = table_linear_iterate;
189         swt->stats = table_linear_stats;
190
191         tl->max_flows = max_flows;
192         atomic_set(&tl->n_flows, 0);
193         INIT_LIST_HEAD(&tl->flows);
194         INIT_LIST_HEAD(&tl->iter_flows);
195         spin_lock_init(&tl->lock);
196         tl->next_serial = 0;
197
198         return swt;
199 }