2 * Copyright (c) 2008, 2009, 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
24 /* Initializes 'hmap' as an empty hash table. */
26 hmap_init(struct hmap *hmap)
28 hmap->buckets = &hmap->one;
34 /* Frees memory reserved by 'hmap'. It is the client's responsibility to free
35 * the nodes themselves, if necessary. */
37 hmap_destroy(struct hmap *hmap)
39 if (hmap && hmap->buckets != &hmap->one) {
44 /* Exchanges hash maps 'a' and 'b'. */
46 hmap_swap(struct hmap *a, struct hmap *b)
55 /* Adjusts 'hmap' to compensate for having moved position in memory (e.g. due
58 hmap_moved(struct hmap *hmap)
61 hmap->buckets = &hmap->one;
66 resize(struct hmap *hmap, size_t new_mask)
71 assert(!(new_mask & (new_mask + 1)));
72 assert(new_mask != SIZE_MAX);
76 tmp.buckets = xmalloc(sizeof *tmp.buckets * (new_mask + 1));
78 for (i = 0; i <= tmp.mask; i++) {
79 tmp.buckets[i] = NULL;
82 for (i = 0; i <= hmap->mask; i++) {
83 struct hmap_node *node, *next;
85 for (node = hmap->buckets[i]; node; node = next) {
87 hmap_insert_fast(&tmp, node, node->hash);
91 COVERAGE_INC(hmap_pathological);
94 hmap_swap(hmap, &tmp);
99 calc_mask(size_t capacity)
101 size_t mask = capacity / 2;
107 #if SIZE_MAX > UINT32_MAX
111 /* If we need to dynamically allocate buckets we might as well allocate at
112 * least 4 of them. */
113 mask |= (mask & 1) << 1;
118 /* Expands 'hmap', if necessary, to optimize the performance of searches. */
120 hmap_expand(struct hmap *hmap)
122 size_t new_mask = calc_mask(hmap->n);
123 if (new_mask > hmap->mask) {
124 COVERAGE_INC(hmap_expand);
125 resize(hmap, new_mask);
129 /* Shrinks 'hmap', if necessary, to optimize the performance of iteration. */
131 hmap_shrink(struct hmap *hmap)
133 size_t new_mask = calc_mask(hmap->n);
134 if (new_mask < hmap->mask) {
135 COVERAGE_INC(hmap_shrink);
136 resize(hmap, new_mask);
140 /* Expands 'hmap', if necessary, to optimize the performance of searches when
141 * it has up to 'n' elements. (But iteration will be slow in a hash map whose
142 * allocated capacity is much higher than its current number of nodes.) */
144 hmap_reserve(struct hmap *hmap, size_t n)
146 size_t new_mask = calc_mask(n);
147 if (new_mask > hmap->mask) {
148 COVERAGE_INC(hmap_reserve);
149 resize(hmap, new_mask);
153 /* Adjusts 'hmap' to compensate for 'old_node' having moved position in memory
154 * to 'node' (e.g. due to realloc()). */
156 hmap_node_moved(struct hmap *hmap,
157 struct hmap_node *old_node, struct hmap_node *node)
159 struct hmap_node **bucket = &hmap->buckets[node->hash & hmap->mask];
160 while (*bucket != old_node) {
161 bucket = &(*bucket)->next;