summaryrefslogtreecommitdiffstats
path: root/src/kmk/hash.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/kmk/hash.c')
-rw-r--r--src/kmk/hash.c536
1 files changed, 536 insertions, 0 deletions
diff --git a/src/kmk/hash.c b/src/kmk/hash.c
new file mode 100644
index 0000000..070d2e4
--- /dev/null
+++ b/src/kmk/hash.c
@@ -0,0 +1,536 @@
+/* hash.c -- hash table maintenance
+Copyright (C) 1995, 1999, 2002, 2010 Free Software Foundation, Inc.
+Written by Greg McGary <gkm@gnu.org> <greg@mcgary.org>
+
+GNU Make is free software; you can redistribute it and/or modify it under the
+terms of the GNU General Public License as published by the Free Software
+Foundation; either version 3 of the License, or (at your option) any later
+version.
+
+GNU Make is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program. If not, see <http://www.gnu.org/licenses/>. */
+
+#include "makeint.h"
+#include "hash.h"
+#ifdef CONFIG_WITH_STRCACHE2
+# include <assert.h>
+#endif
+
+#define CALLOC(t, n) ((t *) xcalloc (sizeof (t) * (n)))
+#define MALLOC(t, n) ((t *) xmalloc (sizeof (t) * (n)))
+#define REALLOC(o, t, n) ((t *) xrealloc ((o), sizeof (t) * (n)))
+#define CLONE(o, t, n) ((t *) memcpy (MALLOC (t, (n)), (o), sizeof (t) * (n)))
+
+static void hash_rehash __P((struct hash_table* ht));
+static unsigned long round_up_2 __P((unsigned long rough));
+
+/* Implement double hashing with open addressing. The table size is
+ always a power of two. The secondary ('increment') hash function
+ is forced to return an odd-value, in order to be relatively prime
+ to the table size. This guarantees that the increment can
+ potentially hit every slot in the table during collision
+ resolution. */
+
+void *hash_deleted_item = &hash_deleted_item;
+
+/* Force the table size to be a power of two, possibly rounding up the
+ given size. */
+
+void
+hash_init (struct hash_table *ht, unsigned long size,
+ hash_func_t hash_1, hash_func_t hash_2, hash_cmp_func_t hash_cmp)
+{
+ ht->ht_size = round_up_2 (size);
+ ht->ht_empty_slots = ht->ht_size;
+ ht->ht_vec = (void**) CALLOC (struct token *, ht->ht_size);
+ if (ht->ht_vec == 0)
+ {
+ fprintf (stderr, _("can't allocate %lu bytes for hash table: memory exhausted"),
+ ht->ht_size * (unsigned long) sizeof (struct token *));
+ exit (MAKE_TROUBLE);
+ }
+
+ ht->ht_capacity = ht->ht_size - (ht->ht_size / 16); /* 93.75% loading factor */
+ ht->ht_fill = 0;
+ ht->ht_collisions = 0;
+ ht->ht_lookups = 0;
+ ht->ht_rehashes = 0;
+ ht->ht_hash_1 = hash_1;
+ ht->ht_hash_2 = hash_2;
+ ht->ht_compare = hash_cmp;
+#ifdef CONFIG_WITH_STRCACHE2
+ ht->ht_strcache = 0;
+ ht->ht_off_string = 0;
+#endif
+}
+
+#ifdef CONFIG_WITH_STRCACHE2
+/* Same as hash_init, except that no callbacks are needed since all
+ keys - including the ones being searched for - are from a string
+ cache. This means that any give string will only have one pointer
+ value and that the hash and length can be retrived very cheaply,
+ thus permitting some nice optimizations.
+
+ STRCACHE points to the string cache, while OFF_STRING gives the
+ offset of the string pointer in the item structures the hash table
+ entries points to. */
+void hash_init_strcached (struct hash_table *ht, unsigned long size,
+ struct strcache2 *strcache, unsigned int off_string)
+{
+ hash_init (ht, size, 0, 0, 0);
+ ht->ht_strcache = strcache;
+ ht->ht_off_string = off_string;
+}
+#endif /* CONFIG_WITH_STRCACHE2 */
+
+/* Load an array of items into 'ht'. */
+
+void
+hash_load (struct hash_table *ht, void *item_table,
+ unsigned long cardinality, unsigned long size)
+{
+ char *items = (char *) item_table;
+#ifndef CONFIG_WITH_STRCACHE2
+ while (cardinality--)
+ {
+ hash_insert (ht, items);
+ items += size;
+ }
+#else /* CONFIG_WITH_STRCACHE2 */
+ if (ht->ht_strcache)
+ while (cardinality--)
+ {
+ hash_insert_strcached (ht, items);
+ items += size;
+ }
+ else
+ while (cardinality--)
+ {
+ hash_insert (ht, items);
+ items += size;
+ }
+#endif /* CONFIG_WITH_STRCACHE2 */
+}
+
+/* Returns the address of the table slot matching 'key'. If 'key' is
+ not found, return the address of an empty slot suitable for
+ inserting 'key'. The caller is responsible for incrementing
+ ht_fill on insertion. */
+
+void **
+hash_find_slot (struct hash_table *ht, const void *key)
+{
+ void **slot;
+ void **deleted_slot = 0;
+ unsigned int hash_2 = 0;
+ unsigned int hash_1 = (*ht->ht_hash_1) (key);
+
+#ifdef CONFIG_WITH_STRCACHE2
+ assert (ht->ht_strcache == 0);
+#endif
+
+ MAKE_STATS (ht->ht_lookups++);
+ MAKE_STATS_3 (make_stats_ht_lookups++);
+ for (;;)
+ {
+ hash_1 &= (ht->ht_size - 1);
+ slot = &ht->ht_vec[hash_1];
+
+ if (*slot == 0)
+ return (deleted_slot ? deleted_slot : slot);
+ if (*slot == hash_deleted_item)
+ {
+ if (deleted_slot == 0)
+ deleted_slot = slot;
+ }
+ else
+ {
+ if (key == *slot)
+ return slot;
+ if ((*ht->ht_compare) (key, *slot) == 0)
+ return slot;
+ MAKE_STATS (ht->ht_collisions++);
+ MAKE_STATS_3 (make_stats_ht_collisions++);
+ }
+ if (!hash_2)
+ hash_2 = (*ht->ht_hash_2) (key) | 1;
+ hash_1 += hash_2;
+ }
+}
+
+#ifdef CONFIG_WITH_STRCACHE2
+/* hash_find_slot version for tables created with hash_init_strcached. */
+void **
+hash_find_slot_strcached (struct hash_table *ht, const void *key)
+{
+ void **slot;
+ void **deleted_slot = 0;
+ const char *str1 = *(const char **)((const char *)key + ht->ht_off_string);
+ const char *str2;
+ unsigned int hash_1 = strcache2_calc_ptr_hash (ht->ht_strcache, str1);
+ unsigned int hash_2;
+
+#ifdef CONFIG_WITH_STRCACHE2
+ assert (ht->ht_strcache != 0);
+#endif
+
+ MAKE_STATS (ht->ht_lookups++);
+ MAKE_STATS_3 (make_stats_ht_lookups++);
+
+ /* first iteration unrolled. */
+
+ hash_1 &= (ht->ht_size - 1);
+ slot = &ht->ht_vec[hash_1];
+ if (*slot == 0)
+ return slot;
+ if (*slot != hash_deleted_item)
+ {
+ str2 = *(const char **)((const char *)(*slot) + ht->ht_off_string);
+ if (str1 == str2)
+ return slot;
+
+ MAKE_STATS (ht->ht_collisions++);
+ MAKE_STATS_3 (make_stats_ht_collisions++);
+ }
+ else
+ deleted_slot = slot;
+
+ /* the rest of the loop. */
+
+ hash_2 = strcache2_get_hash (ht->ht_strcache, str1) | 1;
+ hash_1 += hash_2;
+ for (;;)
+ {
+ hash_1 &= (ht->ht_size - 1);
+ slot = &ht->ht_vec[hash_1];
+
+ if (*slot == 0)
+ return (deleted_slot ? deleted_slot : slot);
+ if (*slot == hash_deleted_item)
+ {
+ if (deleted_slot == 0)
+ deleted_slot = slot;
+ }
+ else
+ {
+ str2 = *(const char **)((const char *)(*slot) + ht->ht_off_string);
+ if (str1 == str2)
+ return slot;
+
+ MAKE_STATS (ht->ht_collisions++);
+ MAKE_STATS_3 (make_stats_ht_collisions++);
+ }
+
+ hash_1 += hash_2;
+ }
+}
+#endif /* CONFIG_WITH_STRCACHE2 */
+
+void *
+hash_find_item (struct hash_table *ht, const void *key)
+{
+ void **slot = hash_find_slot (ht, key);
+ return ((HASH_VACANT (*slot)) ? 0 : *slot);
+}
+
+#ifdef CONFIG_WITH_STRCACHE2
+void *
+hash_find_item_strcached (struct hash_table *ht, const void *key)
+{
+ void **slot = hash_find_slot_strcached (ht, key);
+ return ((HASH_VACANT (*slot)) ? 0 : *slot);
+}
+#endif /* CONFIG_WITH_STRCACHE2 */
+
+void *
+hash_insert (struct hash_table *ht, const void *item)
+{
+ void **slot = hash_find_slot (ht, item);
+ const void *old_item = *slot;
+ hash_insert_at (ht, item, slot);
+ return (void *)((HASH_VACANT (old_item)) ? 0 : old_item);
+}
+
+#ifdef CONFIG_WITH_STRCACHE2
+void *
+hash_insert_strcached (struct hash_table *ht, const void *item)
+{
+ void **slot = hash_find_slot_strcached (ht, item);
+ const void *old_item = slot ? *slot : 0;
+ hash_insert_at (ht, item, slot);
+ return (void *)((HASH_VACANT (old_item)) ? 0 : old_item);
+}
+#endif /* CONFIG_WITH_STRCACHE2 */
+
+void *
+hash_insert_at (struct hash_table *ht, const void *item, const void *slot)
+{
+ const void *old_item = *(void **) slot;
+ if (HASH_VACANT (old_item))
+ {
+ ht->ht_fill++;
+ if (old_item == 0)
+ ht->ht_empty_slots--;
+ old_item = item;
+ }
+ *(void const **) slot = item;
+ if (ht->ht_empty_slots < ht->ht_size - ht->ht_capacity)
+ {
+ hash_rehash (ht);
+#ifdef CONFIG_WITH_STRCACHE2
+ if (ht->ht_strcache)
+ return (void *)hash_find_slot_strcached (ht, item);
+#endif /* CONFIG_WITH_STRCACHE2 */
+ return (void *) hash_find_slot (ht, item);
+ }
+ else
+ return (void *) slot;
+}
+
+void *
+hash_delete (struct hash_table *ht, const void *item)
+{
+ void **slot = hash_find_slot (ht, item);
+ return hash_delete_at (ht, slot);
+}
+
+#ifdef CONFIG_WITH_STRCACHE2
+void *
+hash_delete_strcached (struct hash_table *ht, const void *item)
+{
+ void **slot = hash_find_slot_strcached (ht, item);
+ return hash_delete_at (ht, slot);
+}
+#endif /* CONFIG_WITH_STRCACHE2 */
+
+void *
+hash_delete_at (struct hash_table *ht, const void *slot)
+{
+ void *item = *(void **) slot;
+ if (!HASH_VACANT (item))
+ {
+ *(void const **) slot = hash_deleted_item;
+ ht->ht_fill--;
+ return item;
+ }
+ else
+ return 0;
+}
+
+void
+hash_free_items (struct hash_table *ht)
+{
+ void **vec = ht->ht_vec;
+ void **end = &vec[ht->ht_size];
+ for (; vec < end; vec++)
+ {
+ void *item = *vec;
+ if (!HASH_VACANT (item))
+ free (item);
+ *vec = 0;
+ }
+ ht->ht_fill = 0;
+ ht->ht_empty_slots = ht->ht_size;
+}
+
+#ifdef CONFIG_WITH_ALLOC_CACHES
+void
+hash_free_items_cached (struct hash_table *ht, struct alloccache *cache)
+{
+ void **vec = ht->ht_vec;
+ void **end = &vec[ht->ht_size];
+ for (; vec < end; vec++)
+ {
+ void *item = *vec;
+ if (!HASH_VACANT (item))
+ alloccache_free (cache, item);
+ *vec = 0;
+ }
+ ht->ht_fill = 0;
+ ht->ht_empty_slots = ht->ht_size;
+}
+#endif /* CONFIG_WITH_ALLOC_CACHES */
+
+void
+hash_delete_items (struct hash_table *ht)
+{
+ void **vec = ht->ht_vec;
+ void **end = &vec[ht->ht_size];
+ for (; vec < end; vec++)
+ *vec = 0;
+ ht->ht_fill = 0;
+ ht->ht_collisions = 0;
+ ht->ht_lookups = 0;
+ ht->ht_rehashes = 0;
+ ht->ht_empty_slots = ht->ht_size;
+}
+
+void
+hash_free (struct hash_table *ht, int free_items)
+{
+ if (free_items)
+ hash_free_items (ht);
+ else
+ {
+ ht->ht_fill = 0;
+ ht->ht_empty_slots = ht->ht_size;
+ }
+ free (ht->ht_vec);
+ ht->ht_vec = 0;
+ ht->ht_capacity = 0;
+}
+
+#ifdef CONFIG_WITH_ALLOC_CACHES
+void
+hash_free_cached (struct hash_table *ht, int free_items, struct alloccache *cache)
+{
+ if (free_items)
+ hash_free_items_cached (ht, cache);
+ else
+ {
+ ht->ht_fill = 0;
+ ht->ht_empty_slots = ht->ht_size;
+ }
+ free (ht->ht_vec);
+ ht->ht_vec = 0;
+ ht->ht_capacity = 0;
+}
+#endif /* CONFIG_WITH_ALLOC_CACHES */
+
+void
+hash_map (struct hash_table *ht, hash_map_func_t map)
+{
+ void **slot;
+ void **end = &ht->ht_vec[ht->ht_size];
+
+ for (slot = ht->ht_vec; slot < end; slot++)
+ {
+ if (!HASH_VACANT (*slot))
+ (*map) (*slot);
+ }
+}
+
+void
+hash_map_arg (struct hash_table *ht, hash_map_arg_func_t map, void *arg)
+{
+ void **slot;
+ void **end = &ht->ht_vec[ht->ht_size];
+
+ for (slot = ht->ht_vec; slot < end; slot++)
+ {
+ if (!HASH_VACANT (*slot))
+ (*map) (*slot, arg);
+ }
+}
+
+/* Double the size of the hash table in the event of overflow... */
+
+static void
+hash_rehash (struct hash_table *ht)
+{
+ unsigned long old_ht_size = ht->ht_size;
+ void **old_vec = ht->ht_vec;
+ void **ovp;
+
+ if (ht->ht_fill >= ht->ht_capacity)
+ {
+ ht->ht_size *= 2;
+ ht->ht_capacity = ht->ht_size - (ht->ht_size >> 4);
+ }
+ ht->ht_rehashes++;
+ ht->ht_vec = (void **) CALLOC (struct token *, ht->ht_size);
+
+#ifndef CONFIG_WITH_STRCACHE2
+ for (ovp = old_vec; ovp < &old_vec[old_ht_size]; ovp++)
+ {
+ if (! HASH_VACANT (*ovp))
+ {
+ void **slot = hash_find_slot (ht, *ovp);
+ *slot = *ovp;
+ }
+ }
+#else /* CONFIG_WITH_STRCACHE2 */
+ if (ht->ht_strcache)
+ for (ovp = old_vec; ovp < &old_vec[old_ht_size]; ovp++)
+ {
+ if (! HASH_VACANT (*ovp))
+ {
+ void **slot = hash_find_slot_strcached (ht, *ovp);
+ *slot = *ovp;
+ }
+ }
+ else
+ for (ovp = old_vec; ovp < &old_vec[old_ht_size]; ovp++)
+ {
+ if (! HASH_VACANT (*ovp))
+ {
+ void **slot = hash_find_slot (ht, *ovp);
+ *slot = *ovp;
+ }
+ }
+#endif /* CONFIG_WITH_STRCACHE2 */
+ ht->ht_empty_slots = ht->ht_size - ht->ht_fill;
+ free (old_vec);
+}
+
+void
+hash_print_stats (struct hash_table *ht, FILE *out_FILE)
+{
+ /* GKM FIXME: honor NO_FLOAT */
+ fprintf (out_FILE, _("Load=%ld/%ld=%.0f%%, "), ht->ht_fill, ht->ht_size,
+ 100.0 * (double) ht->ht_fill / (double) ht->ht_size);
+ fprintf (out_FILE, _("Rehash=%d, "), ht->ht_rehashes);
+ MAKE_STATS(
+ fprintf (out_FILE, _("Collisions=%ld/%ld=%.0f%%"), ht->ht_collisions, ht->ht_lookups,
+ (ht->ht_lookups
+ ? (100.0 * (double) ht->ht_collisions / (double) ht->ht_lookups)
+ : 0));
+ );
+}
+
+/* Dump all items into a NULL-terminated vector. Use the
+ user-supplied vector, or malloc one. */
+
+void **
+hash_dump (struct hash_table *ht, void **vector_0, qsort_cmp_t compare)
+{
+ void **vector;
+ void **slot;
+ void **end = &ht->ht_vec[ht->ht_size];
+
+ if (vector_0 == 0)
+ vector_0 = MALLOC (void *, ht->ht_fill + 1);
+ vector = vector_0;
+
+ for (slot = ht->ht_vec; slot < end; slot++)
+ if (!HASH_VACANT (*slot))
+ *vector++ = *slot;
+ *vector = 0;
+
+ if (compare)
+ qsort (vector_0, ht->ht_fill, sizeof (void *), compare);
+ return vector_0;
+}
+
+/* Round a given number up to the nearest power of 2. */
+
+static unsigned long
+round_up_2 (unsigned long n)
+{
+ n |= (n >> 1);
+ n |= (n >> 2);
+ n |= (n >> 4);
+ n |= (n >> 8);
+ n |= (n >> 16);
+
+#if !defined(HAVE_LIMITS_H) || ULONG_MAX > 4294967295
+ /* We only need this on systems where unsigned long is >32 bits. */
+ n |= (n >> 32);
+#endif
+
+ return n + 1;
+}