2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008 The Board of Trustees of The Leland
4 * Stanford Junior University
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
15 #include <linux/highmem.h>
16 #include <asm/pgtable.h>
18 static void *kmem_alloc(size_t);
19 static void *kmem_zalloc(size_t);
20 static void kmem_free(void *, size_t);
22 struct sw_table_hash {
26 unsigned int bucket_mask; /* Number of buckets minus 1. */
27 struct sw_flow **buckets;
30 static struct sw_flow **find_bucket(struct sw_table *swt,
31 const struct sw_flow_key *key)
33 struct sw_table_hash *th = (struct sw_table_hash *) swt;
34 unsigned int crc = crc32_calculate(&th->crc32, key,
35 offsetof(struct sw_flow_key, wildcards));
36 return &th->buckets[crc & th->bucket_mask];
39 static struct sw_flow *table_hash_lookup(struct sw_table *swt,
40 const struct sw_flow_key *key)
42 struct sw_flow *flow = *find_bucket(swt, key);
43 return flow && flow_keys_equal(&flow->key, key) ? flow : NULL;
46 static int table_hash_insert(struct sw_table *swt, struct sw_flow *flow)
48 struct sw_table_hash *th = (struct sw_table_hash *) swt;
49 struct sw_flow **bucket;
52 if (flow->key.wildcards != 0)
55 bucket = find_bucket(swt, &flow->key);
56 if (*bucket == NULL) {
58 rcu_assign_pointer(*bucket, flow);
61 struct sw_flow *old_flow = *bucket;
62 if (flow_keys_equal(&old_flow->key, &flow->key)) {
63 rcu_assign_pointer(*bucket, flow);
64 flow_deferred_free(old_flow);
73 /* Caller must update n_flows. */
74 static int do_delete(struct sw_flow **bucket, struct sw_flow *flow)
76 rcu_assign_pointer(*bucket, NULL);
77 flow_deferred_free(flow);
81 /* Returns number of deleted flows. We can ignore the priority
82 * argument, since all exact-match entries are the same (highest)
84 static int table_hash_delete(struct sw_table *swt,
85 const struct sw_flow_key *key,
86 uint16_t priority, int strict)
88 struct sw_table_hash *th = (struct sw_table_hash *) swt;
89 unsigned int count = 0;
91 if (key->wildcards == 0) {
92 struct sw_flow **bucket = find_bucket(swt, key);
93 struct sw_flow *flow = *bucket;
94 if (flow && flow_keys_equal(&flow->key, key))
95 count = do_delete(bucket, flow);
99 for (i = 0; i <= th->bucket_mask; i++) {
100 struct sw_flow **bucket = &th->buckets[i];
101 struct sw_flow *flow = *bucket;
102 if (flow && flow_del_matches(&flow->key, key, strict))
103 count += do_delete(bucket, flow);
106 th->n_flows -= count;
110 static int table_hash_timeout(struct datapath *dp, struct sw_table *swt)
112 struct sw_table_hash *th = (struct sw_table_hash *) swt;
116 mutex_lock(&dp_mutex);
117 for (i = 0; i <= th->bucket_mask; i++) {
118 struct sw_flow **bucket = &th->buckets[i];
119 struct sw_flow *flow = *bucket;
121 int reason = flow_timeout(flow);
123 count += do_delete(bucket, flow);
124 dp_send_flow_expired(dp, flow, reason);
128 th->n_flows -= count;
129 mutex_unlock(&dp_mutex);
134 static void table_hash_destroy(struct sw_table *swt)
136 struct sw_table_hash *th = (struct sw_table_hash *) swt;
138 for (i = 0; i <= th->bucket_mask; i++)
140 flow_free(th->buckets[i]);
141 kmem_free(th->buckets, (th->bucket_mask + 1) * sizeof *th->buckets);
145 static int table_hash_iterate(struct sw_table *swt,
146 const struct sw_flow_key *key,
147 struct sw_table_position *position,
148 int (*callback)(struct sw_flow *, void *private),
151 struct sw_table_hash *th = (struct sw_table_hash *) swt;
153 if (position->private[0] > th->bucket_mask)
156 if (key->wildcards == 0) {
157 struct sw_flow *flow;
160 flow = table_hash_lookup(swt, key);
164 error = callback(flow, private);
166 position->private[0] = -1;
171 for (i = position->private[0]; i <= th->bucket_mask; i++) {
172 struct sw_flow *flow = th->buckets[i];
173 if (flow && flow_matches_1wild(&flow->key, key)) {
174 int error = callback(flow, private);
176 position->private[0] = i;
184 static void table_hash_stats(struct sw_table *swt,
185 struct sw_table_stats *stats)
187 struct sw_table_hash *th = (struct sw_table_hash *) swt;
188 stats->name = "hash";
189 stats->n_flows = th->n_flows;
190 stats->max_flows = th->bucket_mask + 1;
191 stats->n_matched = swt->n_matched;
194 struct sw_table *table_hash_create(unsigned int polynomial,
195 unsigned int n_buckets)
197 struct sw_table_hash *th;
198 struct sw_table *swt;
200 th = kzalloc(sizeof *th, GFP_KERNEL);
204 BUG_ON(n_buckets & (n_buckets - 1));
205 th->buckets = kmem_zalloc(n_buckets * sizeof *th->buckets);
206 if (th->buckets == NULL) {
207 printk("failed to allocate %u buckets\n", n_buckets);
211 th->bucket_mask = n_buckets - 1;
214 swt->lookup = table_hash_lookup;
215 swt->insert = table_hash_insert;
216 swt->delete = table_hash_delete;
217 swt->timeout = table_hash_timeout;
218 swt->destroy = table_hash_destroy;
219 swt->iterate = table_hash_iterate;
220 swt->stats = table_hash_stats;
222 crc32_init(&th->crc32, polynomial);
228 /* Double-hashing table. */
230 struct sw_table_hash2 {
232 struct sw_table *subtable[2];
235 static struct sw_flow *table_hash2_lookup(struct sw_table *swt,
236 const struct sw_flow_key *key)
238 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
241 for (i = 0; i < 2; i++) {
242 struct sw_flow *flow = *find_bucket(t2->subtable[i], key);
243 if (flow && flow_keys_equal(&flow->key, key))
249 static int table_hash2_insert(struct sw_table *swt, struct sw_flow *flow)
251 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
253 if (table_hash_insert(t2->subtable[0], flow))
255 return table_hash_insert(t2->subtable[1], flow);
258 static int table_hash2_delete(struct sw_table *swt,
259 const struct sw_flow_key *key,
260 uint16_t priority, int strict)
262 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
263 return (table_hash_delete(t2->subtable[0], key, priority, strict)
264 + table_hash_delete(t2->subtable[1], key, priority, strict));
267 static int table_hash2_timeout(struct datapath *dp, struct sw_table *swt)
269 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
270 return (table_hash_timeout(dp, t2->subtable[0])
271 + table_hash_timeout(dp, t2->subtable[1]));
274 static void table_hash2_destroy(struct sw_table *swt)
276 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
277 table_hash_destroy(t2->subtable[0]);
278 table_hash_destroy(t2->subtable[1]);
282 static int table_hash2_iterate(struct sw_table *swt,
283 const struct sw_flow_key *key,
284 struct sw_table_position *position,
285 int (*callback)(struct sw_flow *, void *),
288 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
291 for (i = position->private[1]; i < 2; i++) {
292 int error = table_hash_iterate(t2->subtable[i], key, position,
297 position->private[0] = 0;
298 position->private[1]++;
303 static void table_hash2_stats(struct sw_table *swt,
304 struct sw_table_stats *stats)
306 struct sw_table_hash2 *t2 = (struct sw_table_hash2 *) swt;
307 struct sw_table_stats substats[2];
310 for (i = 0; i < 2; i++)
311 table_hash_stats(t2->subtable[i], &substats[i]);
312 stats->name = "hash2";
313 stats->n_flows = substats[0].n_flows + substats[1].n_flows;
314 stats->max_flows = substats[0].max_flows + substats[1].max_flows;
315 stats->n_matched = swt->n_matched;
318 struct sw_table *table_hash2_create(unsigned int poly0, unsigned int buckets0,
319 unsigned int poly1, unsigned int buckets1)
322 struct sw_table_hash2 *t2;
323 struct sw_table *swt;
325 t2 = kzalloc(sizeof *t2, GFP_KERNEL);
329 t2->subtable[0] = table_hash_create(poly0, buckets0);
330 if (t2->subtable[0] == NULL)
333 t2->subtable[1] = table_hash_create(poly1, buckets1);
334 if (t2->subtable[1] == NULL)
335 goto out_free_subtable0;
338 swt->lookup = table_hash2_lookup;
339 swt->insert = table_hash2_insert;
340 swt->delete = table_hash2_delete;
341 swt->timeout = table_hash2_timeout;
342 swt->destroy = table_hash2_destroy;
343 swt->iterate = table_hash2_iterate;
344 swt->stats = table_hash2_stats;
349 table_hash_destroy(t2->subtable[0]);
355 /* From fs/xfs/linux-2.4/kmem.c. */
358 kmem_alloc(size_t size)
362 #ifdef KMALLOC_MAX_SIZE
363 if (size > KMALLOC_MAX_SIZE)
366 ptr = kmalloc(size, GFP_KERNEL);
370 printk("openflow: used vmalloc for %lu bytes\n",
371 (unsigned long)size);
377 kmem_zalloc(size_t size)
379 void *ptr = kmem_alloc(size);
381 memset(ptr, 0, size);
386 kmem_free(void *ptr, size_t size)
388 if (((unsigned long)ptr < VMALLOC_START) ||
389 ((unsigned long)ptr >= VMALLOC_END)) {