2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
13 #include <linux/slab.h>
20 unsigned int max_flows;
21 unsigned int bucket_mask; /* Number of buckets minus 1. */
22 struct hlist_head *buckets;
25 static struct hlist_head *find_bucket(struct sw_table *swt,
26 const struct sw_flow_key *key)
28 struct sw_table_mac *tm = (struct sw_table_mac *) swt;
29 unsigned int crc = crc32_calculate(&tm->crc32, key, sizeof *key);
30 return &tm->buckets[crc & tm->bucket_mask];
33 static struct sw_flow *table_mac_lookup(struct sw_table *swt,
34 const struct sw_flow_key *key)
36 struct hlist_head *bucket = find_bucket(swt, key);
37 struct hlist_node *pos;
39 hlist_for_each_entry_rcu (flow, pos, bucket, u.hnode)
40 if (!memcmp(key->dl_src, flow->key.dl_src, 6))
45 static int table_mac_insert(struct sw_table *swt, struct sw_flow *flow)
47 struct sw_table_mac *tm = (struct sw_table_mac *) swt;
48 struct hlist_head *bucket;
49 struct hlist_node *pos;
50 unsigned long int flags;
53 /* MAC table only handles flows that match on Ethernet
54 source address and wildcard everything else. */
55 if (likely(flow->key.wildcards != (OFPFW_ALL & ~OFPFW_DL_SRC)))
57 bucket = find_bucket(swt, &flow->key);
59 spin_lock_irqsave(&tm->lock, flags);
60 hlist_for_each_entry_rcu (f, pos, bucket, u.hnode) {
61 if (!memcmp(f->key.dl_src, flow->key.dl_src, 6)
63 hlist_replace_rcu(&f->u.hnode, &flow->u.hnode);
64 spin_unlock_irqrestore(&tm->lock, flags);
65 flow_deferred_free(f);
71 if (atomic_read(&tm->n_flows) >= tm->max_flows) {
72 spin_unlock_irqrestore(&tm->lock, flags);
75 atomic_inc(&tm->n_flows);
77 hlist_add_head_rcu(&flow->u.hnode, bucket);
78 spin_unlock_irqrestore(&tm->lock, flags);
82 static int do_delete(struct sw_table *swt, struct sw_flow *flow)
85 hlist_del_rcu(&flow->u.hnode);
86 flow_deferred_free(flow);
92 /* Returns number of deleted flows. */
93 static int table_mac_delete(struct sw_table *swt,
94 const struct sw_flow_key *key, int strict)
96 struct sw_table_mac *tm = (struct sw_table_mac *) swt;
98 if (key->wildcards == (OFPFW_ALL & ~OFPFW_DL_SRC)) {
99 struct sw_flow *flow = table_mac_lookup(swt, key);
100 if (flow && do_delete(swt, flow)) {
101 atomic_dec(&tm->n_flows);
108 for (i = 0; i <= tm->bucket_mask; i++) {
109 struct hlist_head *bucket = &tm->buckets[i];
110 struct hlist_node *pos;
111 struct sw_flow *flow;
112 hlist_for_each_entry_rcu (flow, pos, bucket, u.hnode)
113 if (flow_del_matches(&flow->key, key, strict))
114 count += do_delete(swt, flow);
117 atomic_sub(count, &tm->n_flows);
122 static int table_mac_timeout(struct datapath *dp, struct sw_table *swt)
124 struct sw_table_mac *tm = (struct sw_table_mac *) swt;
128 for (i = 0; i <= tm->bucket_mask; i++) {
129 struct hlist_head *bucket = &tm->buckets[i];
130 struct hlist_node *pos;
131 struct sw_flow *flow;
132 hlist_for_each_entry_rcu (flow, pos, bucket, u.hnode) {
133 if (flow_timeout(flow)) {
134 count += do_delete(swt, flow);
135 if (dp->hello_flags & OFP_CHELLO_SEND_FLOW_EXP)
136 dp_send_flow_expired(dp, flow);
141 atomic_sub(count, &tm->n_flows);
145 static void table_mac_destroy(struct sw_table *swt)
147 struct sw_table_mac *tm = (struct sw_table_mac *) swt;
149 for (i = 0; i <= tm->bucket_mask; i++) {
150 struct hlist_head *hlist = &tm->buckets[i];
151 while (!hlist_empty(hlist)) {
152 struct sw_flow *flow = hlist_entry(hlist->first,
153 struct sw_flow, u.hnode);
154 hlist_del(&flow->u.hnode);
162 struct swt_iterator_mac {
163 struct sw_table_mac *tm;
164 unsigned int bucket_i;
167 static struct sw_flow *next_head_flow(struct swt_iterator_mac *im)
169 for (; im->bucket_i <= im->tm->bucket_mask; im->bucket_i++) {
170 struct hlist_node *first = im->tm->buckets[im->bucket_i].first;
172 struct sw_flow *f = hlist_entry(first,
181 static int table_mac_iterator(struct sw_table *swt,
182 struct swt_iterator *swt_iter)
184 struct swt_iterator_mac *im;
186 swt_iter->private = im = kmalloc(sizeof *im, GFP_KERNEL);
190 im->tm = (struct sw_table_mac *) swt;
192 if (atomic_read(&im->tm->n_flows) == 0)
193 swt_iter->flow = NULL;
196 swt_iter->flow = next_head_flow(im);
202 static void table_mac_next(struct swt_iterator *swt_iter)
204 struct swt_iterator_mac *im;
205 struct hlist_node *next;
207 if (swt_iter->flow == NULL)
210 im = (struct swt_iterator_mac *) swt_iter->private;
212 next = swt_iter->flow->u.hnode.next;
214 swt_iter->flow = hlist_entry(next, struct sw_flow, u.hnode);
217 swt_iter->flow = next_head_flow(im);
221 static void table_mac_iterator_destroy(struct swt_iterator *swt_iter)
223 kfree(swt_iter->private);
226 static void table_mac_stats(struct sw_table *swt, struct sw_table_stats *stats)
228 struct sw_table_mac *tm = (struct sw_table_mac *) swt;
230 stats->n_flows = atomic_read(&tm->n_flows);
231 stats->max_flows = tm->max_flows;
234 struct sw_table *table_mac_create(unsigned int n_buckets,
235 unsigned int max_flows)
237 struct sw_table_mac *tm;
238 struct sw_table *swt;
240 tm = kzalloc(sizeof *tm, GFP_KERNEL);
244 BUG_ON(n_buckets & (n_buckets - 1));
246 tm->buckets = kzalloc(n_buckets * sizeof *tm->buckets, GFP_KERNEL);
247 if (tm->buckets == NULL) {
248 printk("failed to allocate %u buckets\n", n_buckets);
252 tm->bucket_mask = n_buckets - 1;
255 swt->lookup = table_mac_lookup;
256 swt->insert = table_mac_insert;
257 swt->delete = table_mac_delete;
258 swt->timeout = table_mac_timeout;
259 swt->destroy = table_mac_destroy;
260 swt->stats = table_mac_stats;
262 swt->iterator = table_mac_iterator;
263 swt->iterator_next = table_mac_next;
264 swt->iterator_destroy = table_mac_iterator_destroy;
266 crc32_init(&tm->crc32, 0x04C11DB7); /* Ethernet CRC. */
267 atomic_set(&tm->n_flows, 0);
268 tm->max_flows = max_flows;
269 spin_lock_init(&tm->lock);