1 /* hash - hashing table processing.
2 Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
3 Written by Jim Meyering, 1992.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software Foundation,
17 Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
19 /* A generic hash table package. */
21 /* Define USE_OBSTACK to 1 if you want the allocator to use obstacks instead
22 of malloc. If you change USE_OBSTACK, you have to recompile! */
33 typedef enum {false = 0, true = 1} bool;
38 #ifndef HAVE_DECL_FREE
39 "this configure-time declaration test was not run"
45 #ifndef HAVE_DECL_MALLOC
46 "this configure-time declaration test was not run"
54 # ifndef obstack_chunk_alloc
55 # define obstack_chunk_alloc malloc
57 # ifndef obstack_chunk_free
58 # define obstack_chunk_free free
64 /* A hash table contains many internal entries, each holding a pointer to
65 some user provided data (also called a user entry). An entry indistinctly
66 refers to both the internal entry and its associated user entry. A user
67 entry contents may be hashed by a randomization function (the hashing
68 function, or just `hasher' for short) into a number (or `slot') between 0
69 and the current table size. At each slot position in the hash table,
70 starts a linked chain of entries for which the user data all hash to this
71 slot. A bucket is the collection of all entries hashing to the same slot.
73 A good `hasher' function will distribute entries rather evenly in buckets.
74 In the ideal case, the length of each bucket is roughly the number of
75 entries divided by the table size. Finding the slot for a data is usually
76 done in constant time by the `hasher', and the later finding of a precise
77 entry is linear in time with the size of the bucket. Consequently, a
78 larger hash table size (that is, a larger number of buckets) is prone to
79 yielding shorter chains, *given* the `hasher' function behaves properly.
81 Long buckets slow down the lookup algorithm. One might use big hash table
82 sizes in hope to reduce the average length of buckets, but this might
83 become inordinate, as unused slots in the hash table take some space. The
84 best bet is to make sure you are using a good `hasher' function (beware
85 that those are not that easy to write! :-), and to use a table size
86 larger than the actual number of entries. */
88 /* If an insertion makes the ratio of nonempty buckets to table size larger
89 than the growth threshold (a number between 0.0 and 1.0), then increase
90 the table size by multiplying by the growth factor (a number greater than
91 1.0). The growth threshold defaults to 0.8, and the growth factor
92 defaults to 1.414, meaning that the table will have doubled its size
93 every second time 80% of the buckets get used. */
94 #define DEFAULT_GROWTH_THRESHOLD 0.8
95 #define DEFAULT_GROWTH_FACTOR 1.414
97 /* If a deletion empties a bucket and causes the ratio of used buckets to
98 table size to become smaller than the shrink threshold (a number between
99 0.0 and 1.0), then shrink the table by multiplying by the shrink factor (a
100 number greater than the shrink threshold but smaller than 1.0). The shrink
101 threshold and factor default to 0.0 and 1.0, meaning that the table never
103 #define DEFAULT_SHRINK_THRESHOLD 0.0
104 #define DEFAULT_SHRINK_FACTOR 1.0
106 /* Use this to initialize or reset a TUNING structure to
107 some sensible values. */
108 static const Hash_tuning default_tuning =
110 DEFAULT_SHRINK_THRESHOLD,
111 DEFAULT_SHRINK_FACTOR,
112 DEFAULT_GROWTH_THRESHOLD,
113 DEFAULT_GROWTH_FACTOR,
117 /* Information and lookup. */
119 /* The following few functions provide information about the overall hash
120 table organization: the number of entries, number of buckets and maximum
121 length of buckets. */
123 /* Return the number of buckets in the hash table. The table size, the total
124 number of buckets (used plus unused), or the maximum number of slots, are
125 the same quantity. */
128 hash_get_n_buckets (const Hash_table *table)
130 return table->n_buckets;
133 /* Return the number of slots in use (non-empty buckets). */
136 hash_get_n_buckets_used (const Hash_table *table)
138 return table->n_buckets_used;
141 /* Return the number of active entries. */
144 hash_get_n_entries (const Hash_table *table)
146 return table->n_entries;
149 /* Return the length of the longest chain (bucket). */
152 hash_get_max_bucket_length (const Hash_table *table)
154 struct hash_entry *bucket;
155 unsigned max_bucket_length = 0;
157 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
161 struct hash_entry *cursor = bucket;
162 unsigned bucket_length = 1;
164 while (cursor = cursor->next, cursor)
167 if (bucket_length > max_bucket_length)
168 max_bucket_length = bucket_length;
172 return max_bucket_length;
175 /* Do a mild validation of a hash table, by traversing it and checking two
179 hash_table_ok (const Hash_table *table)
181 struct hash_entry *bucket;
182 unsigned n_buckets_used = 0;
183 unsigned n_entries = 0;
185 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
189 struct hash_entry *cursor = bucket;
191 /* Count bucket head. */
195 /* Count bucket overflow. */
196 while (cursor = cursor->next, cursor)
201 if (n_buckets_used == table->n_buckets_used && n_entries == table->n_entries)
208 hash_print_statistics (const Hash_table *table, FILE *stream)
210 unsigned n_entries = hash_get_n_entries (table);
211 unsigned n_buckets = hash_get_n_buckets (table);
212 unsigned n_buckets_used = hash_get_n_buckets_used (table);
213 unsigned max_bucket_length = hash_get_max_bucket_length (table);
215 fprintf (stream, "# entries: %u\n", n_entries);
216 fprintf (stream, "# buckets: %u\n", n_buckets);
217 fprintf (stream, "# buckets used: %u (%.2f%%)\n", n_buckets_used,
218 (100.0 * n_buckets_used) / n_buckets);
219 fprintf (stream, "max bucket length: %u\n", max_bucket_length);
222 /* If ENTRY matches an entry already in the hash table, return the
223 entry from the table. Otherwise, return NULL. */
226 hash_lookup (const Hash_table *table, const void *entry)
228 struct hash_entry *bucket
229 = table->bucket + table->hasher (entry, table->n_buckets);
230 struct hash_entry *cursor;
232 assert (bucket < table->bucket_limit);
234 if (bucket->data == NULL)
237 for (cursor = bucket; cursor; cursor = cursor->next)
238 if (table->comparator (entry, cursor->data))
246 /* The functions in this page traverse the hash table and process the
247 contained entries. For the traversal to work properly, the hash table
248 should not be resized nor modified while any particular entry is being
249 processed. In particular, entries should not be added or removed. */
251 /* Return the first data in the table, or NULL if the table is empty. */
254 hash_get_first (const Hash_table *table)
256 struct hash_entry *bucket;
258 if (table->n_entries == 0)
261 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
269 /* Return the user data for the entry following ENTRY, where ENTRY has been
270 returned by a previous call to either `hash_get_first' or `hash_get_next'.
271 Return NULL if there are no more entries. */
274 hash_get_next (const Hash_table *table, const void *entry)
276 struct hash_entry *bucket
277 = table->bucket + table->hasher (entry, table->n_buckets);
278 struct hash_entry *cursor;
280 assert (bucket < table->bucket_limit);
282 /* Find next entry in the same bucket. */
283 for (cursor = bucket; cursor; cursor = cursor->next)
284 if (cursor->data == entry && cursor->next)
285 return cursor->next->data;
287 /* Find first entry in any subsequent bucket. */
288 while (++bucket < table->bucket_limit)
296 /* Fill BUFFER with pointers to active user entries in the hash table, then
297 return the number of pointers copied. Do not copy more than BUFFER_SIZE
301 hash_get_entries (const Hash_table *table, void **buffer,
302 unsigned buffer_size)
304 unsigned counter = 0;
305 struct hash_entry *bucket;
306 struct hash_entry *cursor;
308 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
312 for (cursor = bucket; cursor; cursor = cursor->next)
314 if (counter >= buffer_size)
316 buffer[counter++] = cursor->data;
324 /* Call a PROCESSOR function for each entry of a hash table, and return the
325 number of entries for which the processor function returned success. A
326 pointer to some PROCESSOR_DATA which will be made available to each call to
327 the processor function. The PROCESSOR accepts two arguments: the first is
328 the user entry being walked into, the second is the value of PROCESSOR_DATA
329 as received. The walking continue for as long as the PROCESSOR function
330 returns nonzero. When it returns zero, the walking is interrupted. */
333 hash_do_for_each (const Hash_table *table, Hash_processor processor,
334 void *processor_data)
336 unsigned counter = 0;
337 struct hash_entry *bucket;
338 struct hash_entry *cursor;
340 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
344 for (cursor = bucket; cursor; cursor = cursor->next)
346 if (!(*processor) (cursor->data, processor_data))
356 /* Allocation and clean-up. */
358 /* Return a hash index for a NUL-terminated STRING between 0 and N_BUCKETS-1.
359 This is a convenience routine for constructing other hashing functions. */
363 /* About hashings, Paul Eggert writes to me (FP), on 1994-01-01: "Please see
364 B. J. McKenzie, R. Harries & T. Bell, Selecting a hashing algorithm,
365 Software--practice & experience 20, 2 (Feb 1990), 209-224. Good hash
366 algorithms tend to be domain-specific, so what's good for [diffutils'] io.c
367 may not be good for your application." */
370 hash_string (const char *string, unsigned n_buckets)
375 # define ROTATE_LEFT(Value, Shift) \
376 ((Value) << (Shift) | (Value) >> ((sizeof (unsigned) * CHAR_BIT) - (Shift)))
377 # define HASH_ONE_CHAR(Value, Byte) \
378 ((Byte) + ROTATE_LEFT (Value, 7))
382 for (; *string; string++)
383 value = HASH_ONE_CHAR (value, *(const unsigned char *) string);
384 return value % n_buckets;
387 # undef HASH_ONE_CHAR
390 #else /* not USE_DIFF_HASH */
392 /* This one comes from `recode', and performs a bit better than the above as
393 per a few experiments. It is inspired from a hashing routine found in the
394 very old Cyber `snoop', itself written in typical Greg Mansfield style.
395 (By the way, what happened to this excellent man? Is he still alive?) */
398 hash_string (const char *string, unsigned n_buckets)
403 value = ((value * 31 + (int) *(const unsigned char *) string++)
408 #endif /* not USE_DIFF_HASH */
410 /* Return true if CANDIDATE is a prime number. CANDIDATE should be an odd
411 number at least equal to 11. */
414 is_prime (unsigned long candidate)
416 unsigned long divisor = 3;
417 unsigned long square = divisor * divisor;
419 while (square < candidate && (candidate % divisor))
422 square += 4 * divisor;
426 return (candidate % divisor ? true : false);
429 /* Round a given CANDIDATE number up to the nearest prime, and return that
430 prime. Primes lower than 10 are merely skipped. */
433 next_prime (unsigned long candidate)
435 /* Skip small primes. */
439 /* Make it definitely odd. */
442 while (!is_prime (candidate))
449 hash_reset_tuning (Hash_tuning *tuning)
451 *tuning = default_tuning;
454 /* For the given hash TABLE, check the user supplied tuning structure for
455 reasonable values, and return true if there is no gross error with it.
456 Otherwise, definitively reset the TUNING field to some acceptable default
457 in the hash table (that is, the user loses the right of further modifying
458 tuning arguments), and return false. */
461 check_tuning (Hash_table *table)
463 const Hash_tuning *tuning = table->tuning;
465 if (tuning->growth_threshold > 0.0
466 && tuning->growth_threshold < 1.0
467 && tuning->growth_factor > 1.0
468 && tuning->shrink_threshold >= 0.0
469 && tuning->shrink_threshold < 1.0
470 && tuning->shrink_factor > tuning->shrink_threshold
471 && tuning->shrink_factor <= 1.0
472 && tuning->shrink_threshold < tuning->growth_threshold)
475 table->tuning = &default_tuning;
479 /* Allocate and return a new hash table, or NULL upon failure. The initial
480 number of buckets is automatically selected so as to _guarantee_ that you
481 may insert at least CANDIDATE different user entries before any growth of
482 the hash table size occurs. So, if have a reasonably tight a-priori upper
483 bound on the number of entries you intend to insert in the hash table, you
484 may save some table memory and insertion time, by specifying it here. If
485 the IS_N_BUCKETS field of the TUNING structure is true, the CANDIDATE
486 argument has its meaning changed to the wanted number of buckets.
488 TUNING points to a structure of user-supplied values, in case some fine
489 tuning is wanted over the default behavior of the hasher. If TUNING is
490 NULL, the default tuning parameters are used instead.
492 The user-supplied HASHER function should be provided. It accepts two
493 arguments ENTRY and TABLE_SIZE. It computes, by hashing ENTRY contents, a
494 slot number for that entry which should be in the range 0..TABLE_SIZE-1.
495 This slot number is then returned.
497 The user-supplied COMPARATOR function should be provided. It accepts two
498 arguments pointing to user data, it then returns true for a pair of entries
499 that compare equal, or false otherwise. This function is internally called
500 on entries which are already known to hash to the same bucket index.
502 The user-supplied DATA_FREER function, when not NULL, may be later called
503 with the user data as an argument, just before the entry containing the
504 data gets freed. This happens from within `hash_free' or `hash_clear'.
505 You should specify this function only if you want these functions to free
506 all of your `data' data. This is typically the case when your data is
507 simply an auxiliary struct that you have malloc'd to aggregate several
511 hash_initialize (unsigned candidate, const Hash_tuning *tuning,
512 Hash_hasher hasher, Hash_comparator comparator,
513 Hash_data_freer data_freer)
516 struct hash_entry *bucket;
518 if (hasher == NULL || comparator == NULL)
521 table = (Hash_table *) malloc (sizeof (Hash_table));
526 tuning = &default_tuning;
527 table->tuning = tuning;
528 if (!check_tuning (table))
530 /* Fail if the tuning options are invalid. This is the only occasion
531 when the user gets some feedback about it. Once the table is created,
532 if the user provides invalid tuning options, we silently revert to
533 using the defaults, and ignore further request to change the tuning
540 = next_prime (tuning->is_n_buckets ? candidate
541 : (unsigned) (candidate / tuning->growth_threshold));
543 table->bucket = (struct hash_entry *)
544 malloc (table->n_buckets * sizeof (struct hash_entry));
545 if (table->bucket == NULL)
550 table->bucket_limit = table->bucket + table->n_buckets;
552 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
557 table->n_buckets_used = 0;
558 table->n_entries = 0;
560 table->hasher = hasher;
561 table->comparator = comparator;
562 table->data_freer = data_freer;
564 table->free_entry_list = NULL;
566 obstack_init (&table->entry_stack);
571 /* Make all buckets empty, placing any chained entries on the free list.
572 Apply the user-specified function data_freer (if any) to the datas of any
576 hash_clear (Hash_table *table)
578 struct hash_entry *bucket;
580 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
584 struct hash_entry *cursor;
585 struct hash_entry *next;
587 /* Free the bucket overflow. */
588 for (cursor = bucket->next; cursor; cursor = next)
590 if (table->data_freer)
591 (*table->data_freer) (cursor->data);
595 /* Relinking is done one entry at a time, as it is to be expected
596 that overflows are either rare or short. */
597 cursor->next = table->free_entry_list;
598 table->free_entry_list = cursor;
601 /* Free the bucket head. */
602 if (table->data_freer)
603 (*table->data_freer) (bucket->data);
609 table->n_buckets_used = 0;
610 table->n_entries = 0;
613 /* Reclaim all storage associated with a hash table. If a data_freer
614 function has been supplied by the user when the hash table was created,
615 this function applies it to the data of each entry before freeing that
619 hash_free (Hash_table *table)
621 struct hash_entry *bucket;
622 struct hash_entry *cursor;
623 struct hash_entry *next;
625 /* Call the user data_freer function. */
626 if (table->data_freer && table->n_entries)
628 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
632 for (cursor = bucket; cursor; cursor = cursor->next)
634 (*table->data_freer) (cursor->data);
642 obstack_free (&table->entry_stack, NULL);
646 /* Free all bucket overflowed entries. */
647 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
649 for (cursor = bucket->next; cursor; cursor = next)
656 /* Also reclaim the internal list of previously freed entries. */
657 for (cursor = table->free_entry_list; cursor; cursor = next)
665 /* Free the remainder of the hash table structure. */
666 free (table->bucket);
670 /* Insertion and deletion. */
672 /* Get a new hash entry for a bucket overflow, possibly by reclying a
673 previously freed one. If this is not possible, allocate a new one. */
675 static struct hash_entry *
676 allocate_entry (Hash_table *table)
678 struct hash_entry *new;
680 if (table->free_entry_list)
682 new = table->free_entry_list;
683 table->free_entry_list = new->next;
688 new = (struct hash_entry *)
689 obstack_alloc (&table->entry_stack, sizeof (struct hash_entry));
691 new = (struct hash_entry *) malloc (sizeof (struct hash_entry));
698 /* Free a hash entry which was part of some bucket overflow,
699 saving it for later recycling. */
702 free_entry (Hash_table *table, struct hash_entry *entry)
705 entry->next = table->free_entry_list;
706 table->free_entry_list = entry;
709 /* This private function is used to help with insertion and deletion. When
710 ENTRY matches an entry in the table, return a pointer to the corresponding
711 user data and set *BUCKET_HEAD to the head of the selected bucket.
712 Otherwise, return NULL. When DELETE is true and ENTRY matches an entry in
713 the table, unlink the matching entry. */
716 hash_find_entry (Hash_table *table, const void *entry,
717 struct hash_entry **bucket_head, bool delete)
719 struct hash_entry *bucket
720 = table->bucket + table->hasher (entry, table->n_buckets);
721 struct hash_entry *cursor;
723 assert (bucket < table->bucket_limit);
724 *bucket_head = bucket;
726 /* Test for empty bucket. */
727 if (bucket->data == NULL)
730 /* See if the entry is the first in the bucket. */
731 if ((*table->comparator) (entry, bucket->data))
733 void *data = bucket->data;
739 struct hash_entry *next = bucket->next;
741 /* Bump the first overflow entry into the bucket head, then save
742 the previous first overflow entry for later recycling. */
744 free_entry (table, next);
755 /* Scan the bucket overflow. */
756 for (cursor = bucket; cursor->next; cursor = cursor->next)
758 if ((*table->comparator) (entry, cursor->next->data))
760 void *data = cursor->next->data;
764 struct hash_entry *next = cursor->next;
766 /* Unlink the entry to delete, then save the freed entry for later
768 cursor->next = next->next;
769 free_entry (table, next);
776 /* No entry found. */
780 /* For an already existing hash table, change the number of buckets through
781 specifying CANDIDATE. The contents of the hash table are preserved. The
782 new number of buckets is automatically selected so as to _guarantee_ that
783 the table may receive at least CANDIDATE different user entries, including
784 those already in the table, before any other growth of the hash table size
785 occurs. If TUNING->IS_N_BUCKETS is true, then CANDIDATE specifies the
786 exact number of buckets desired. */
789 hash_rehash (Hash_table *table, unsigned candidate)
791 Hash_table *new_table;
792 struct hash_entry *bucket;
793 struct hash_entry *cursor;
794 struct hash_entry *next;
796 new_table = hash_initialize (candidate, table->tuning, table->hasher,
797 table->comparator, table->data_freer);
798 if (new_table == NULL)
801 /* Merely reuse the extra old space into the new table. */
803 obstack_free (&new_table->entry_stack, NULL);
804 new_table->entry_stack = table->entry_stack;
806 new_table->free_entry_list = table->free_entry_list;
808 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
810 for (cursor = bucket; cursor; cursor = next)
812 void *data = cursor->data;
813 struct hash_entry *new_bucket
815 + new_table->hasher (data, new_table->n_buckets));
817 assert (new_bucket < new_table->bucket_limit);
820 if (new_bucket->data)
822 if (cursor == bucket)
824 /* Allocate or recycle an entry, when moving from a bucket
825 header into a bucket overflow. */
826 struct hash_entry *new_entry = allocate_entry (new_table);
828 if (new_entry == NULL)
831 new_entry->data = data;
832 new_entry->next = new_bucket->next;
833 new_bucket->next = new_entry;
837 /* Merely relink an existing entry, when moving from a
838 bucket overflow into a bucket overflow. */
839 cursor->next = new_bucket->next;
840 new_bucket->next = cursor;
845 /* Free an existing entry, when moving from a bucket
846 overflow into a bucket header. Also take care of the
847 simple case of moving from a bucket header into a bucket
849 new_bucket->data = data;
850 new_table->n_buckets_used++;
851 if (cursor != bucket)
852 free_entry (new_table, cursor);
856 free (table->bucket);
857 table->bucket = new_table->bucket;
858 table->bucket_limit = new_table->bucket_limit;
859 table->n_buckets = new_table->n_buckets;
860 table->n_buckets_used = new_table->n_buckets_used;
861 table->free_entry_list = new_table->free_entry_list;
862 /* table->n_entries already holds its value. */
864 table->entry_stack = new_table->entry_stack;
871 /* If ENTRY matches an entry already in the hash table, return the pointer
872 to the entry from the table. Otherwise, insert ENTRY and return ENTRY.
873 Return NULL if the storage required for insertion cannot be allocated. */
876 hash_insert (Hash_table *table, const void *entry)
879 struct hash_entry *bucket;
881 assert (entry); /* cannot insert a NULL entry */
883 /* If there's a matching entry already in the table, return that. */
884 if ((data = hash_find_entry (table, entry, &bucket, false)) != NULL)
887 /* ENTRY is not matched, it should be inserted. */
891 struct hash_entry *new_entry = allocate_entry (table);
893 if (new_entry == NULL)
896 /* Add ENTRY in the overflow of the bucket. */
898 new_entry->data = (void *) entry;
899 new_entry->next = bucket->next;
900 bucket->next = new_entry;
902 return (void *) entry;
905 /* Add ENTRY right in the bucket head. */
907 bucket->data = (void *) entry;
909 table->n_buckets_used++;
911 /* If the growth threshold of the buckets in use has been reached, increase
912 the table size and rehash. There's no point in checking the number of
913 entries: if the hashing function is ill-conditioned, rehashing is not
914 likely to improve it. */
916 if (table->n_buckets_used
917 > table->tuning->growth_threshold * table->n_buckets)
919 /* Check more fully, before starting real work. If tuning arguments
920 became invalid, the second check will rely on proper defaults. */
921 check_tuning (table);
922 if (table->n_buckets_used
923 > table->tuning->growth_threshold * table->n_buckets)
925 const Hash_tuning *tuning = table->tuning;
927 = (unsigned) (tuning->is_n_buckets
928 ? (table->n_buckets * tuning->growth_factor)
929 : (table->n_buckets * tuning->growth_factor
930 * tuning->growth_threshold));
932 /* If the rehash fails, arrange to return NULL. */
933 if (!hash_rehash (table, candidate))
938 return (void *) entry;
941 /* If ENTRY is already in the table, remove it and return the just-deleted
942 data (the user may want to deallocate its storage). If ENTRY is not in the
943 table, don't modify the table and return NULL. */
946 hash_delete (Hash_table *table, const void *entry)
949 struct hash_entry *bucket;
951 data = hash_find_entry (table, entry, &bucket, true);
958 table->n_buckets_used--;
960 /* If the shrink threshold of the buckets in use has been reached,
961 rehash into a smaller table. */
963 if (table->n_buckets_used
964 < table->tuning->shrink_threshold * table->n_buckets)
966 /* Check more fully, before starting real work. If tuning arguments
967 became invalid, the second check will rely on proper defaults. */
968 check_tuning (table);
969 if (table->n_buckets_used
970 < table->tuning->shrink_threshold * table->n_buckets)
972 const Hash_tuning *tuning = table->tuning;
974 = (unsigned) (tuning->is_n_buckets
975 ? table->n_buckets * tuning->shrink_factor
976 : (table->n_buckets * tuning->shrink_factor
977 * tuning->growth_threshold));
979 hash_rehash (table, candidate);
992 hash_print (const Hash_table *table)
994 struct hash_entry *bucket;
996 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
998 struct hash_entry *cursor;
1001 printf ("%d:\n", bucket - table->bucket);
1003 for (cursor = bucket; cursor; cursor = cursor->next)
1005 char *s = (char *) cursor->data;
1008 printf (" %s\n", s);
1013 #endif /* TESTING */