X-Git-Url: https://pintos-os.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=datapath%2Ftable.c;h=725845de568ebb5c629f1785aac793fed8dd3a18;hb=6c222e55fa4222c6724094e1e7a0a69addf6b030;hp=280630854a6435d830e132fd53d019aa5ad2252d;hpb=e1040c772fbebc3abe9e7222f9c540112c4dca81;p=openvswitch diff --git a/datapath/table.c b/datapath/table.c index 28063085..725845de 100644 --- a/datapath/table.c +++ b/datapath/table.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010 Nicira Networks. + * Copyright (c) 2009, 2010, 2011 Nicira Networks. * Distributed under the terms of the GNU GPL version 2. * * Significant portions of this file may be copied from parts of the Linux @@ -10,6 +10,7 @@ #include "datapath.h" #include "table.h" +#include #include #include #include @@ -30,6 +31,17 @@ struct tbl_bucket { struct tbl_node *objs[]; }; +static struct tbl_bucket *get_bucket(struct tbl_bucket __rcu *bucket) +{ + return rcu_dereference_check(bucket, rcu_read_lock_held() || + lockdep_genl_is_held()); +} + +static struct tbl_bucket *get_bucket_protected(struct tbl_bucket __rcu *bucket) +{ + return rcu_dereference_protected(bucket, lockdep_genl_is_held()); +} + static inline int bucket_size(int n_objs) { return sizeof(struct tbl_bucket) + sizeof(struct tbl_node *) * n_objs; @@ -40,17 +52,18 @@ static struct tbl_bucket *bucket_alloc(int n_objs) return kmalloc(bucket_size(n_objs), GFP_KERNEL); } -static void free_buckets(struct tbl_bucket ***l1, unsigned int n_buckets, +static void free_buckets(struct tbl_bucket __rcu ***l1, + unsigned int n_buckets, void (*free_obj)(struct tbl_node *)) { unsigned int i; - for (i = 0; i < n_buckets >> TBL_L1_BITS; i++) { - struct tbl_bucket **l2 = l1[i]; + for (i = 0; i < n_buckets >> TBL_L1_SHIFT; i++) { + struct tbl_bucket __rcu **l2 = l1[i]; unsigned int j; - for (j = 0; j < TBL_L1_SIZE; j++) { - struct tbl_bucket *bucket = l2[j]; + for (j = 0; j < TBL_L2_SIZE; j++) { + struct tbl_bucket *bucket = (struct tbl_bucket __force *)l2[j]; if (!bucket) continue; @@ -66,19 +79,19 @@ static void free_buckets(struct tbl_bucket ***l1, unsigned int n_buckets, kfree(l1); } -static struct tbl_bucket ***alloc_buckets(unsigned int n_buckets) +static struct tbl_bucket __rcu ***alloc_buckets(unsigned int n_buckets) { - struct tbl_bucket ***l1; + struct tbl_bucket __rcu ***l1; unsigned int i; - l1 = kmalloc((n_buckets >> TBL_L1_BITS) * sizeof(struct tbl_bucket **), + l1 = kmalloc((n_buckets >> TBL_L1_SHIFT) * sizeof(struct tbl_bucket **), GFP_KERNEL); if (!l1) return NULL; - for (i = 0; i < n_buckets >> TBL_L1_BITS; i++) { - l1[i] = (struct tbl_bucket **)get_zeroed_page(GFP_KERNEL); + for (i = 0; i < n_buckets >> TBL_L1_SHIFT; i++) { + l1[i] = (struct tbl_bucket __rcu **)get_zeroed_page(GFP_KERNEL); if (!l1[i]) { - free_buckets(l1, i << TBL_L1_BITS, NULL); + free_buckets(l1, i << TBL_L1_SHIFT, NULL); return NULL; } } @@ -90,17 +103,14 @@ static struct tbl_bucket ***alloc_buckets(unsigned int n_buckets) * @n_buckets: number of buckets in the new table * * Creates and returns a new hash table, or %NULL if memory cannot be - * allocated. @n_buckets must be a power of 2 in the range %TBL_L1_SIZE to + * allocated. @n_buckets must be a power of 2 in the range %TBL_MIN_BUCKETS to * %TBL_MAX_BUCKETS. */ struct tbl *tbl_create(unsigned int n_buckets) { struct tbl *table; - if (!n_buckets) - n_buckets = TBL_L1_SIZE; - - table = kzalloc(sizeof *table, GFP_KERNEL); + table = kzalloc(sizeof(*table), GFP_KERNEL); if (!table) goto err; @@ -168,14 +178,14 @@ static struct tbl_bucket __rcu **find_bucket(struct tbl *table, u32 hash) return &table->buckets[l1][l2]; } -static int search_bucket(const struct tbl_bucket *bucket, void *target, u32 hash, - int (*cmp)(const struct tbl_node *, void *)) +static int search_bucket(const struct tbl_bucket *bucket, void *target, int len, u32 hash, + int (*cmp)(const struct tbl_node *, void *, int len)) { int i; for (i = 0; i < bucket->n_objs; i++) { struct tbl_node *obj = bucket->objs[i]; - if (obj->hash == hash && likely(cmp(obj, target))) + if (obj->hash == hash && likely(cmp(obj, target, len))) return i; } @@ -187,6 +197,8 @@ static int search_bucket(const struct tbl_bucket *bucket, void *target, u32 hash * @table: hash table to search * @target: identifier for the object that is being searched for, will be * provided as an argument to @cmp when making comparisions + * @len: length of @target in bytes, will be provided as an argument to @cmp + * when making comparisons * @hash: hash of @target * @cmp: comparision function to match objects with the given hash, returns * nonzero if the objects match, zero otherwise @@ -194,17 +206,17 @@ static int search_bucket(const struct tbl_bucket *bucket, void *target, u32 hash * Searches @table for an object identified by @target. Returns the tbl_node * contained in the object if successful, otherwise %NULL. */ -struct tbl_node *tbl_lookup(struct tbl *table, void *target, u32 hash, - int (*cmp)(const struct tbl_node *, void *)) +struct tbl_node *tbl_lookup(struct tbl *table, void *target, int len, u32 hash, + int (*cmp)(const struct tbl_node *, void *, int)) { struct tbl_bucket __rcu **bucketp = find_bucket(table, hash); - struct tbl_bucket *bucket = rcu_dereference(*bucketp); + struct tbl_bucket *bucket = get_bucket(*bucketp); int index; if (!bucket) return NULL; - index = search_bucket(bucket, target, hash, cmp); + index = search_bucket(bucket, target, len, hash, cmp); if (index < 0) return NULL; @@ -228,16 +240,23 @@ struct tbl_node *tbl_lookup(struct tbl *table, void *target, u32 hash, int tbl_foreach(struct tbl *table, int (*callback)(struct tbl_node *, void *aux), void *aux) { - unsigned int i, j, k; - for (i = 0; i < table->n_buckets >> TBL_L1_BITS; i++) { - struct tbl_bucket __rcu **l2 = table->buckets[i]; - for (j = 0; j < TBL_L1_SIZE; j++) { - struct tbl_bucket *bucket = rcu_dereference(l2[j]); + unsigned int n_l1 = table->n_buckets >> TBL_L1_SHIFT; + unsigned int l1_idx; + + for (l1_idx = 0; l1_idx < n_l1; l1_idx++) { + struct tbl_bucket __rcu **l2 = table->buckets[l1_idx]; + unsigned int l2_idx; + + for (l2_idx = 0; l2_idx < TBL_L2_SIZE; l2_idx++) { + struct tbl_bucket *bucket; + unsigned int i; + + bucket = get_bucket(l2[l2_idx]); if (!bucket) continue; - for (k = 0; k < bucket->n_objs; k++) { - int error = (*callback)(bucket->objs[k], aux); + for (i = 0; i < bucket->n_objs; i++) { + int error = (*callback)(bucket->objs[i], aux); if (error) return error; } @@ -246,6 +265,53 @@ int tbl_foreach(struct tbl *table, return 0; } +/** + * tbl_next - find next node in hash table + * @table: table to iterate + * @bucketp: On entry, hash value of bucket to start from. On exit, updated + * to bucket to start from on next call. + * @objp: On entry, index to start from within first bucket. On exit, updated + * to index to start from on next call. + * + * Returns the next node in @table in hash order, or %NULL when no nodes remain + * in the hash table. + * + * On entry, uses the values that @bucketp and @objp reference to determine + * where to begin iteration. Use 0 for both values to begin a new iteration. + * On exit, stores the values to pass on the next iteration into @bucketp and + * @objp's referents. + */ +struct tbl_node *tbl_next(struct tbl *table, u32 *bucketp, u32 *objp) +{ + unsigned int n_l1 = table->n_buckets >> TBL_L1_SHIFT; + u32 s_l1_idx = *bucketp >> TBL_L1_SHIFT; + u32 s_l2_idx = *bucketp & (TBL_L2_SIZE - 1); + u32 s_obj = *objp; + unsigned int l1_idx; + + for (l1_idx = s_l1_idx; l1_idx < n_l1; l1_idx++) { + struct tbl_bucket __rcu **l2 = table->buckets[l1_idx]; + unsigned int l2_idx; + + for (l2_idx = s_l2_idx; l2_idx < TBL_L2_SIZE; l2_idx++) { + struct tbl_bucket *bucket; + + bucket = get_bucket_protected(l2[l2_idx]); + if (bucket && s_obj < bucket->n_objs) { + *bucketp = (l1_idx << TBL_L1_SHIFT) + l2_idx; + *objp = s_obj + 1; + return bucket->objs[s_obj]; + } + + s_obj = 0; + } + s_l2_idx = 0; + } + *bucketp = 0; + *objp = 0; + return NULL; +} + static int insert_table_flow(struct tbl_node *node, void *new_table_) { struct tbl *new_table = new_table_; @@ -273,7 +339,7 @@ struct tbl *tbl_expand(struct tbl *table) } err = -ENOMEM; - new_table = tbl_create(n_buckets); + new_table = tbl_create(TBL_MIN_BUCKETS); if (!new_table) goto error; @@ -319,7 +385,7 @@ static void free_bucket_rcu(struct rcu_head *rcu) int tbl_insert(struct tbl *table, struct tbl_node *target, u32 hash) { struct tbl_bucket __rcu **oldp = find_bucket(table, hash); - struct tbl_bucket *old = rcu_dereference(*oldp); + struct tbl_bucket *old = get_bucket_protected(*oldp); unsigned int n = old ? old->n_objs : 0; struct tbl_bucket *new = bucket_alloc(n + 1); @@ -357,7 +423,7 @@ int tbl_insert(struct tbl *table, struct tbl_node *target, u32 hash) int tbl_remove(struct tbl *table, struct tbl_node *target) { struct tbl_bucket __rcu **oldp = find_bucket(table, target->hash); - struct tbl_bucket *old = rcu_dereference(*oldp); + struct tbl_bucket *old = get_bucket_protected(*oldp); unsigned int n = old->n_objs; struct tbl_bucket *new;