2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007 The Board of Trustees of The Leland Stanford Junior Univer
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
15 #include <linux/highmem.h>
16 #include <asm/pgtable.h>
18 static void *kmem_alloc(size_t);
19 static void *kmem_zalloc(size_t);
20 static void kmem_free(void *, size_t);
22 struct sw_table_hash {
27 unsigned int bucket_mask; /* Number of buckets minus 1. */
28 struct sw_flow **buckets;
31 static struct sw_flow **find_bucket(struct sw_table *swt,
32 const struct sw_flow_key *key)
34 struct sw_table_hash *th = (struct sw_table_hash *) swt;
35 unsigned int crc = crc32_calculate(&th->crc32, key, sizeof *key);
36 return &th->buckets[crc & th->bucket_mask];
39 static struct sw_flow *table_hash_lookup(struct sw_table *swt,
40 const struct sw_flow_key *key)
42 struct sw_flow *flow = *find_bucket(swt, key);
43 return flow && !memcmp(&flow->key, key, sizeof *key) ? flow : NULL;
46 static int table_hash_insert(struct sw_table *swt, struct sw_flow *flow)
48 struct sw_table_hash *th = (struct sw_table_hash *) swt;
49 struct sw_flow **bucket;
50 unsigned long int flags;
53 if (flow->key.wildcards != 0)
56 spin_lock_irqsave(&th->lock, flags);
57 bucket = find_bucket(swt, &flow->key);
58 if (*bucket == NULL) {
59 atomic_inc(&th->n_flows);
60 rcu_assign_pointer(*bucket, flow);
63 struct sw_flow *old_flow = *bucket;
64 if (!memcmp(&old_flow->key, &flow->key, sizeof flow->key)
65 && flow_del(old_flow)) {
66 rcu_assign_pointer(*bucket, flow);
67 flow_deferred_free(old_flow);
73 spin_unlock_irqrestore(&th->lock, flags);
77 /* Caller must update n_flows. */
78 static int do_delete(struct sw_flow **bucket, struct sw_flow *flow)
81 rcu_assign_pointer(*bucket, NULL);
82 flow_deferred_free(flow);
88 /* Returns number of deleted flows. */
89 static int table_hash_delete(struct sw_table *swt,
90 const struct sw_flow_key *key, int strict)
92 struct sw_table_hash *th = (struct sw_table_hash *) swt;
93 unsigned int count = 0;
95 if (key->wildcards == 0) {
96 struct sw_flow **bucket = find_bucket(swt, key);
97 struct sw_flow *flow = *bucket;
98 if (flow && !memcmp(&flow->key, key, sizeof *key))
99 count = do_delete(bucket, flow);
103 for (i = 0; i <= th->bucket_mask; i++) {
104 struct sw_flow **bucket = &th->buckets[i];
105 struct sw_flow *flow = *bucket;
106 if (flow && flow_del_matches(&flow->key, key, strict))
107 count += do_delete(bucket, flow);
111 atomic_sub(count, &th->n_flows);
115 static int table_hash_timeout(struct datapath *dp, struct sw_table *swt)
117 struct sw_table_hash *th = (struct sw_table_hash *) swt;
121 for (i = 0; i <= th->bucket_mask; i++) {
122 struct sw_flow **bucket = &th->buckets[i];
123 struct sw_flow *flow = *bucket;
124 if (flow && flow_timeout(flow)) {
125 count += do_delete(bucket, flow);
126 if (dp->hello_flags & OFP_CHELLO_SEND_FLOW_EXP)
127 dp_send_flow_expired(dp, flow);
132 atomic_sub(count, &th->n_flows);
136 static void table_hash_destroy(struct sw_table *swt)
138 struct sw_table_hash *th = (struct sw_table_hash *) swt;
140 for (i = 0; i <= th->bucket_mask; i++)
142 flow_free(th->buckets[i]);
143 kmem_free(th->buckets, (th->bucket_mask + 1) * sizeof *th->buckets);
147 struct swt_iterator_hash {
148 struct sw_table_hash *th;
149 unsigned int bucket_i;
152 static struct sw_flow *next_flow(struct swt_iterator_hash *ih)
154 for (;ih->bucket_i <= ih->th->bucket_mask; ih->bucket_i++) {
155 struct sw_flow *f = ih->th->buckets[ih->bucket_i];
163 static int table_hash_iterator(struct sw_table *swt,
164 struct swt_iterator *swt_iter)
166 struct swt_iterator_hash *ih;
168 swt_iter->private = ih = kmalloc(sizeof *ih, GFP_KERNEL);
173 ih->th = (struct sw_table_hash *) swt;
176 swt_iter->flow = next_flow(ih);
181 static void table_hash_next(struct swt_iterator *swt_iter)
183 struct swt_iterator_hash *ih;
185 if (swt_iter->flow == NULL)
188 ih = (struct swt_iterator_hash *) swt_iter->private;
191 swt_iter->flow = next_flow(ih);
194 static void table_hash_iterator_destroy(struct swt_iterator *swt_iter)
196 kfree(swt_iter->private);
199 static void table_hash_stats(struct sw_table *swt,
200 struct sw_table_stats *stats)
202 struct sw_table_hash *th = (struct sw_table_hash *) swt;
203 stats->name = "hash";
204 stats->n_flows = atomic_read(&th->n_flows);
205 stats->max_flows = th->bucket_mask + 1;
208 struct sw_table *table_hash_create(unsigned int polynomial,
209 unsigned int n_buckets)
211 struct sw_table_hash *th;
212 struct sw_table *swt;
214 th = kmalloc(sizeof *th, GFP_KERNEL);
218 BUG_ON(n_buckets & (n_buckets - 1));
219 th->buckets = kmem_zalloc(n_buckets * sizeof *th->buckets);
220 if (th->buckets == NULL) {
221 printk("failed to allocate %u buckets\n", n_buckets);
225 th->bucket_mask = n_buckets - 1;
228 swt->lookup = table_hash_lookup;
229 swt->insert = table_hash_insert;
230 swt->delete = table_hash_delete;
231 swt->timeout = table_hash_timeout;
232 swt->destroy = table_hash_destroy;
233 swt->iterator = table_hash_iterator;
234 swt->iterator_next = table_hash_next;
235 swt->iterator_destroy = table_hash_iterator_destroy;
236 swt->stats = table_hash_stats;
238 spin_lock_init(&th->lock);
239 crc32_init(&th->crc32, polynomial);
240 atomic_set(&th->n_flows, 0);
245 /* Double-hashing table. */
247 struct sw_table_hash2 {
249 struct sw_table *subtable[2];
252 static struct sw_flow *table_hash2_lookup(struct sw_table *swt,
253 const struct sw_flow_key *key)
255 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
258 for (i = 0; i < 2; i++) {
259 struct sw_flow *flow = *find_bucket(t2->subtable[i], key);
260 if (flow && !memcmp(&flow->key, key, sizeof *key))
266 static int table_hash2_insert(struct sw_table *swt, struct sw_flow *flow)
268 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
270 if (table_hash_insert(t2->subtable[0], flow))
272 return table_hash_insert(t2->subtable[1], flow);
275 static int table_hash2_delete(struct sw_table *swt,
276 const struct sw_flow_key *key, int strict)
278 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
279 return (table_hash_delete(t2->subtable[0], key, strict)
280 + table_hash_delete(t2->subtable[1], key, strict));
283 static int table_hash2_timeout(struct datapath *dp, struct sw_table *swt)
285 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
286 return (table_hash_timeout(dp, t2->subtable[0])
287 + table_hash_timeout(dp, t2->subtable[1]));
290 static void table_hash2_destroy(struct sw_table *swt)
292 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
293 table_hash_destroy(t2->subtable[0]);
294 table_hash_destroy(t2->subtable[1]);
298 struct swt_iterator_hash2 {
299 struct sw_table_hash2 *th2;
300 struct swt_iterator ih;
304 static int table_hash2_iterator(struct sw_table *swt,
305 struct swt_iterator *swt_iter)
307 struct swt_iterator_hash2 *ih2;
309 swt_iter->private = ih2 = kmalloc(sizeof *ih2, GFP_KERNEL);
313 ih2->th2 = (struct sw_table_hash2 *) swt;
314 if (!table_hash_iterator(ih2->th2->subtable[0], &ih2->ih)) {
319 if (ih2->ih.flow != NULL) {
320 swt_iter->flow = ih2->ih.flow;
323 table_hash_iterator_destroy(&ih2->ih);
325 if (!table_hash_iterator(ih2->th2->subtable[1], &ih2->ih)) {
329 swt_iter->flow = ih2->ih.flow;
335 static void table_hash2_next(struct swt_iterator *swt_iter)
337 struct swt_iterator_hash2 *ih2;
339 if (swt_iter->flow == NULL)
342 ih2 = (struct swt_iterator_hash2 *) swt_iter->private;
343 table_hash_next(&ih2->ih);
345 if (ih2->ih.flow != NULL) {
346 swt_iter->flow = ih2->ih.flow;
348 if (ih2->table_i == 0) {
349 table_hash_iterator_destroy(&ih2->ih);
351 if (!table_hash_iterator(ih2->th2->subtable[1], &ih2->ih)) {
352 ih2->ih.private = NULL;
353 swt_iter->flow = NULL;
355 swt_iter->flow = ih2->ih.flow;
358 swt_iter->flow = NULL;
363 static void table_hash2_iterator_destroy(struct swt_iterator *swt_iter)
365 struct swt_iterator_hash2 *ih2;
367 ih2 = (struct swt_iterator_hash2 *) swt_iter->private;
368 if (ih2->ih.private != NULL)
369 table_hash_iterator_destroy(&ih2->ih);
373 static void table_hash2_stats(struct sw_table *swt,
374 struct sw_table_stats *stats)
376 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
377 struct sw_table_stats substats[2];
380 for (i = 0; i < 2; i++)
381 table_hash_stats(t2->subtable[i], &substats[i]);
382 stats->name = "hash2";
383 stats->n_flows = substats[0].n_flows + substats[1].n_flows;
384 stats->max_flows = substats[0].max_flows + substats[1].max_flows;
387 struct sw_table *table_hash2_create(unsigned int poly0, unsigned int buckets0,
388 unsigned int poly1, unsigned int buckets1)
391 struct sw_table_hash2 *t2;
392 struct sw_table *swt;
394 t2 = kmalloc(sizeof *t2, GFP_KERNEL);
398 t2->subtable[0] = table_hash_create(poly0, buckets0);
399 if (t2->subtable[0] == NULL)
402 t2->subtable[1] = table_hash_create(poly1, buckets1);
403 if (t2->subtable[1] == NULL)
404 goto out_free_subtable0;
407 swt->lookup = table_hash2_lookup;
408 swt->insert = table_hash2_insert;
409 swt->delete = table_hash2_delete;
410 swt->timeout = table_hash2_timeout;
411 swt->destroy = table_hash2_destroy;
412 swt->stats = table_hash2_stats;
414 swt->iterator = table_hash2_iterator;
415 swt->iterator_next = table_hash2_next;
416 swt->iterator_destroy = table_hash2_iterator_destroy;
421 table_hash_destroy(t2->subtable[0]);
427 /* From fs/xfs/linux-2.4/kmem.c. */
430 kmem_alloc(size_t size)
434 #ifdef KMALLOC_MAX_SIZE
435 if (size > KMALLOC_MAX_SIZE)
438 ptr = kmalloc(size, GFP_KERNEL);
442 printk("openflow: used vmalloc for %lu bytes\n",
443 (unsigned long)size);
449 kmem_zalloc(size_t size)
451 void *ptr = kmem_alloc(size);
453 memset(ptr, 0, size);
458 kmem_free(void *ptr, size_t size)
460 if (((unsigned long)ptr < VMALLOC_START) ||
461 ((unsigned long)ptr >= VMALLOC_END)) {