1 /* Copyright (C) 2002, 2004 Christopher Clark <firstname.lastname@cl.cam.ac.uk>
3 * Modified 2009 by Barret Rhoden <brho@cs.berkeley.edu>
5 * - No longer frees keys or values. It's up to the client to do that.
6 * - Uses the slab allocator for hash entry allocation.
7 * - Merges the iterator code with the main hash table code, mostly to avoid
8 * externing the hentry cache. */
10 #include <ros/common.h>
11 #include <hashtable.h>
18 // File was not annotated and caused ivy based compilation to fail
19 // Someone should really annotate it.
29 Credit for primes table: Aaron Krowne
30 http://br.endernet.org/~akrowne/
31 http://planetmath.org/encyclopedia/GoodHashTablePrimes.html
33 static const unsigned int primes[] = {
35 769, 1543, 3079, 6151,
36 12289, 24593, 49157, 98317,
37 196613, 393241, 786433, 1572869,
38 3145739, 6291469, 12582917, 25165843,
39 50331653, 100663319, 201326611, 402653189,
42 const unsigned int prime_table_length = sizeof(primes)/sizeof(primes[0]);
43 #define APPLY_MAX_LOAD_FACTOR(size) \
45 //const float max_load_factor = 0.65;
47 struct kmem_cache *hentry_cache;
49 /* Call this once on bootup, after initializing the slab allocator. */
50 void hashtable_init(void)
52 hentry_cache = kmem_cache_create("hash_entry", sizeof(struct hash_entry),
53 __alignof__(struct hash_entry), 0, 0, 0);
56 /* Common hash/equals functions. Don't call these directly. */
57 size_t __generic_hash(void *k)
59 /* 0x9e370001UL used by Linux (32 bit)
60 * (prime approx to the golden ratio to the max integer, IAW Knuth)
62 return (size_t)k * 0x9e370001UL;
65 ssize_t __generic_eq(void *k1, void *k2)
70 /*****************************************************************************/
72 create_hashtable(size_t minsize,
73 size_t (*hashf) (void*),
74 ssize_t (*eqf) (void*,void*))
77 size_t pindex, size = primes[0];
78 /* Check requested hashtable isn't too large */
79 if (minsize > (1u << 30)) return NULL;
80 /* Enforce size as prime */
81 for (pindex=0; pindex < prime_table_length; pindex++) {
82 if (primes[pindex] > minsize) { size = primes[pindex]; break; }
84 h = (hashtable_t *)kmalloc(sizeof(hashtable_t), 0);
85 if (NULL == h) return NULL; /*oom*/
86 h->table = (hash_entry_t **)kmalloc(sizeof(hash_entry_t*) * size, 0);
87 if (NULL == h->table) { kfree(h); return NULL; } /*oom*/
88 memset(h->table, 0, size * sizeof(hash_entry_t *));
89 h->tablelength = size;
90 h->primeindex = pindex;
94 h->loadlimit = APPLY_MAX_LOAD_FACTOR(size);
98 /*****************************************************************************/
100 hash(hashtable_t *h, void *k)
102 /* Aim to protect against poor hash functions by adding logic here
103 * - logic taken from java 1.4 hashtable source */
104 size_t i = h->hashfn(k);
106 i ^= ((i >> 14) | (i << 18)); /* >>> */
108 i ^= ((i >> 10) | (i << 22)); /* >>> */
112 /*****************************************************************************/
114 hashtable_expand(hashtable_t *h)
116 /* Double the size of the table to accomodate more entries */
117 hash_entry_t **newtable;
120 size_t newsize, i, index;
121 /* Check we're not hitting max capacity */
122 if (h->primeindex == (prime_table_length - 1)) return 0;
123 newsize = primes[++(h->primeindex)];
125 newtable = (hash_entry_t **)kmalloc(sizeof(hash_entry_t*) * newsize, 0);
126 if (NULL != newtable)
128 memset(newtable, 0, newsize * sizeof(hash_entry_t*));
129 /* This algorithm is not 'stable'. ie. it reverses the list
130 * when it transfers entries between the tables */
131 for (i = 0; i < h->tablelength; i++) {
132 while (NULL != (e = h->table[i])) {
133 h->table[i] = e->next;
134 index = indexFor(newsize,e->h);
135 e->next = newtable[index];
142 /* Plan B: realloc instead */
145 newtable = (hash_entry_t**)
146 krealloc(h->table, newsize*sizeof(hash_entry_t*), 0);
147 if (NULL == newtable) { (h->primeindex)--; return 0; }
149 memset(newtable[h->tablelength], 0, newsize - h->tablelength);
150 for (i = 0; i < h->tablelength; i++) {
151 for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
152 index = indexFor(newsize,e->h);
160 e->next = newtable[index];
166 h->tablelength = newsize;
167 h->loadlimit = APPLY_MAX_LOAD_FACTOR(newsize);
171 /*****************************************************************************/
173 hashtable_count(hashtable_t *h)
175 return h->entrycount;
178 /*****************************************************************************/
180 hashtable_insert(hashtable_t *h, void *k, void *v)
182 /* This method allows duplicate keys - but they shouldn't be used */
185 if (++(h->entrycount) > h->loadlimit)
187 /* Ignore the return value. If expand fails, we should
188 * still try cramming just this value into the existing table
189 * -- we may not have memory for a larger table, but one more
190 * element may be ok. Next time we insert, we'll try expanding again.*/
193 e = (hash_entry_t *)kmem_cache_alloc(hentry_cache, 0);
194 if (NULL == e) { --(h->entrycount); return 0; } /*oom*/
196 index = indexFor(h->tablelength,e->h);
199 e->next = h->table[index];
204 /*****************************************************************************/
205 void * /* returns value associated with key */
206 hashtable_search(hashtable_t *h, void *k)
209 size_t hashvalue, index;
210 hashvalue = hash(h,k);
211 index = indexFor(h->tablelength,hashvalue);
215 /* Check hash value to short circuit heavier comparison */
216 if ((hashvalue == e->h) && (h->eqfn(k, e->k))) return e->v;
222 /*****************************************************************************/
223 void * /* returns value associated with key */
224 hashtable_remove(hashtable_t *h, void *k)
226 /* TODO: consider compacting the table when the load factor drops enough,
227 * or provide a 'compact' method. */
232 size_t hashvalue, index;
234 hashvalue = hash(h,k);
235 index = indexFor(h->tablelength,hash(h,k));
236 pE = &(h->table[index]);
240 /* Check hash value to short circuit heavier comparison */
241 if ((hashvalue == e->h) && (h->eqfn(k, e->k)))
246 kmem_cache_free(hentry_cache, e);
255 /*****************************************************************************/
258 hashtable_destroy(hashtable_t *h)
260 if (hashtable_count(h))
261 warn("Destroying a non-empty hashtable, clean up after yourself!\n");
265 hash_entry_t **table = h->table;
266 for (i = 0; i < h->tablelength; i++) {
271 kmem_cache_free(hentry_cache, f);
278 /*****************************************************************************/
279 /* hashtable_iterator - iterator constructor, be sure to kfree this when
282 * If the htable isn't empty, e and index will refer to the first entry. */
285 hashtable_iterator(hashtable_t *h)
287 size_t i, tablelength;
288 hashtable_itr_t *itr = (hashtable_itr_t *)
289 kmalloc(sizeof(hashtable_itr_t), 0);
290 if (NULL == itr) return NULL;
294 tablelength = h->tablelength;
295 itr->index = tablelength;
296 if (0 == h->entrycount) return itr;
298 for (i = 0; i < tablelength; i++)
300 if (NULL != h->table[i])
302 itr->e = h->table[i];
310 /*****************************************************************************/
311 /* key - return the key of the (key,value) pair at the current position */
312 /* value - return the value of the (key,value) pair at the current position */
315 hashtable_iterator_key(hashtable_itr_t *i)
319 hashtable_iterator_value(hashtable_itr_t *i)
322 /*****************************************************************************/
323 /* advance - advance the iterator to the next element
324 * returns zero if advanced to end of table */
327 hashtable_iterator_advance(hashtable_itr_t *itr)
329 size_t j,tablelength;
330 hash_entry_t **table;
332 if (NULL == itr->e) return 0; /* stupidity check */
337 itr->parent = itr->e;
341 tablelength = itr->h->tablelength;
343 if (tablelength <= (j = ++(itr->index)))
348 table = itr->h->table;
349 while (NULL == (next = table[j]))
351 if (++j >= tablelength)
353 itr->index = tablelength;
363 /*****************************************************************************/
364 /* remove - remove the entry at the current iterator position
365 * and advance the iterator, if there is a successive
367 * If you want the value, read it before you remove:
368 * beware memory leaks if you don't.
369 * Returns zero if end of iteration. */
372 hashtable_iterator_remove(hashtable_itr_t *itr)
374 hash_entry_t *remember_e, *remember_parent;
378 if (NULL == (itr->parent))
380 /* element is head of a chain */
381 itr->h->table[itr->index] = itr->e->next;
383 /* element is mid-chain */
384 itr->parent->next = itr->e->next;
386 /* itr->e is now outside the hashtable */
388 itr->h->entrycount--;
390 /* Advance the iterator, correcting the parent */
391 remember_parent = itr->parent;
392 ret = hashtable_iterator_advance(itr);
393 if (itr->parent == remember_e) { itr->parent = remember_parent; }
394 kmem_cache_free(hentry_cache, remember_e);
398 /*****************************************************************************/
399 ssize_t /* returns zero if not found */
400 hashtable_iterator_search(hashtable_itr_t *itr,
401 hashtable_t *h, void *k)
403 hash_entry_t *e, *parent;
404 size_t hashvalue, index;
406 hashvalue = hash(h,k);
407 index = indexFor(h->tablelength,hashvalue);
413 /* Check hash value to short circuit heavier comparison */
414 if ((hashvalue == e->h) && (h->eqfn(k, e->k)))
418 itr->parent = parent;
428 /* Runs func on each member of the hash table */
429 void hash_for_each(struct hashtable *hash, void func(void*))
431 if (hashtable_count(hash)) {
432 struct hashtable_itr *iter = hashtable_iterator(hash);
434 void *item = hashtable_iterator_value(iter);
436 } while (hashtable_iterator_advance(iter));
441 /* Runs func on each member of the hash table, removing the item after
442 * processing it. Make sure func frees the item, o/w you'll leak. */
443 void hash_for_each_remove(struct hashtable *hash, void func(void*))
445 if (hashtable_count(hash)) {
446 struct hashtable_itr *iter = hashtable_iterator(hash);
448 void *item = hashtable_iterator_value(iter);
450 } while (hashtable_iterator_remove(iter));
456 * Copyright (c) 2002, 2004, Christopher Clark
457 * All rights reserved.
459 * Redistribution and use in source and binary forms, with or without
460 * modification, are permitted provided that the following conditions
463 * * Redistributions of source code must retain the above copyright
464 * notice, this list of conditions and the following disclaimer.
466 * * Redistributions in binary form must reproduce the above copyright
467 * notice, this list of conditions and the following disclaimer in the
468 * documentation and/or other materials provided with the distribution.
470 * * Neither the name of the original author; nor the names of any contributors
471 * may be used to endorse or promote products derived from this software
472 * without specific prior written permission.
475 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
476 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
477 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
478 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
479 * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
480 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
481 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
482 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
483 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
484 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
485 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.