1 /******************************************************************************
2 *
3 * Copyright (c) 2009 Citrix Systems, Inc. (Grzegorz Milos)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18 #include <assert.h>
19 #include <errno.h>
20 #include <math.h>
21 #include <pthread.h>
22 #include <stddef.h>
23 #include <stdint.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <time.h>
27 #include <unistd.h>
28
29 #include "bidir-hash.h"
30
31 static const uint32_t hash_sizes[] = {53, 97, 193, 389, 769, 1543, 3079, 6151,
32 12289, 24593, 49157, 98317, 196613, 393241, 786433, 1572869, 3145739,
33 6291469, 12582917, 25165843, 50331653, 100663319, 201326611, 402653189,
34 805306457, 1610612741};
35 static const uint16_t hash_sizes_len =
36 sizeof(hash_sizes)/sizeof(hash_sizes[0]);
37 static const float hash_max_load_fact = 0.65;
38 static const float hash_min_load_fact = 0.10;
39
40 /* How many buckets will be covered by a single rw lock */
41 #define BUCKETS_PER_LOCK 64
42 #define nr_locks(_nr_buckets) (1 + (_nr_buckets) / BUCKETS_PER_LOCK)
43
44
45 #define HASH_LOCK \
46 pthread_rwlock_t hash_lock
47
48 #define BUCKET_LOCK \
49 pthread_rwlock_t bucket_lock
50
51 struct hash_entry
52 {
53 __k_t key;
54 __v_t value;
55 /* This structure will belong to two buckets, one in each hash table */
56 struct hash_entry *key_next;
57 struct hash_entry *value_next;
58 };
59
60 struct bucket
61 {
62 struct hash_entry *hash_entry;
63 };
64
65 struct bucket_lock
66 {
67 BUCKET_LOCK;
68 };
69
70 struct __hash
71 {
72 int lock_alive;
73 HASH_LOCK; /* protects:
74 * *_tab, tab_size, size_idx, *_load
75 * (all writes with wrlock)
76 */
77 uint32_t nr_ent; /* # entries held in hashtables */
78 struct bucket *key_tab; /* forward mapping hashtable */
79 struct bucket *value_tab; /* backward mapping hashtable */
80 struct bucket_lock *key_lock_tab; /* key table bucket locks */
81 struct bucket_lock *value_lock_tab; /* value table bucket locks */
82 uint32_t tab_size; /* # buckets is hashtables */
83 uint16_t size_idx; /* table size index */
84 uint32_t max_load; /* # entries before rehash */
85 uint32_t min_load; /* # entries before rehash */
86 };
87
88 struct __hash *__hash_init (struct __hash *h, uint32_t min_size);
89 int __key_lookup (struct __hash *h, __k_t k, __v_t *vp);
90 int __value_lookup(struct __hash *h, __v_t v, __k_t *kp);
91 int __insert (struct __hash *h, __k_t k, __v_t v);
92 int __key_remove (struct __hash *h, __k_t k, __v_t *vp);
93 int __value_remove(struct __hash *h, __v_t v, __k_t *kp);
94 int __hash_destroy(struct __hash *h,
95 void (*entry_consumer)(__k_t k, __v_t v, void *p),
96 void *d);
97 int __hash_iterator(struct __hash *h,
98 int (*entry_consumer)(__k_t k, __v_t v, void *p),
99 void *d);
100 static void hash_resize(struct __hash *h);
101
102 #if defined(__arm__)
atomic_inc(uint32_t * v)103 static inline void atomic_inc(uint32_t *v)
104 {
105 unsigned long tmp;
106 int result;
107
108 __asm__ __volatile__("@ atomic_inc\n"
109 "1: ldrex %0, [%3]\n"
110 " add %0, %0, #1\n"
111 " strex %1, %0, [%3]\n"
112 " teq %1, #0\n"
113 " bne 1b"
114 : "=&r" (result), "=&r" (tmp), "+Qo" (*v)
115 : "r" (v)
116 : "cc");
117 }
atomic_dec(uint32_t * v)118 static inline void atomic_dec(uint32_t *v)
119 {
120 unsigned long tmp;
121 int result;
122
123 __asm__ __volatile__("@ atomic_dec\n"
124 "1: ldrex %0, [%3]\n"
125 " sub %0, %0, #1\n"
126 " strex %1, %0, [%3]\n"
127 " teq %1, #0\n"
128 " bne 1b"
129 : "=&r" (result), "=&r" (tmp), "+Qo" (*v)
130 : "r" (v)
131 : "cc");
132 }
133
134 #elif defined(__aarch64__)
135
atomic_inc(uint32_t * v)136 static inline void atomic_inc(uint32_t *v)
137 {
138 unsigned long tmp;
139 int result;
140
141 asm volatile("// atomic_inc\n"
142 "1: ldxr %w0, [%3]\n"
143 " add %w0, %w0, #1\n"
144 " stxr %w1, %w0, [%3]\n"
145 " cbnz %w1, 1b"
146 : "=&r" (result), "=&r" (tmp), "+o" (v)
147 : "r" (v)
148 : "cc");
149 }
150
atomic_dec(uint32_t * v)151 static inline void atomic_dec(uint32_t *v)
152 {
153 unsigned long tmp;
154 int result;
155
156 asm volatile("// atomic_dec\n"
157 "1: ldxr %w0, [%3]\n"
158 " sub %w0, %w0, #1\n"
159 " stxr %w1, %w0, [%3]\n"
160 " cbnz %w1, 1b"
161 : "=&r" (result), "=&r" (tmp), "+o" (v)
162 : "r" (v)
163 : "cc");
164 }
165
166 #else /* __x86__ */
atomic_inc(uint32_t * v)167 static inline void atomic_inc(uint32_t *v)
168 {
169 asm volatile (
170 "lock ; incl %0"
171 : "=m" (*(volatile uint32_t *)v)
172 : "m" (*(volatile uint32_t *)v) );
173 }
atomic_dec(uint32_t * v)174 static inline void atomic_dec(uint32_t *v)
175 {
176 asm volatile (
177 "lock ; decl %0"
178 : "=m" (*(volatile uint32_t *)v)
179 : "m" (*(volatile uint32_t *)v) );
180 }
181 #endif
182
183 #ifdef BIDIR_USE_STDMALLOC
184
alloc_entry(struct __hash * h,int size)185 static void* alloc_entry(struct __hash *h, int size)
186 {
187 return malloc(size);
188 }
189
alloc_buckets(struct __hash * h,int nr_buckets,struct bucket ** bucket_tab,struct bucket_lock ** bucket_locks_tab)190 static void alloc_buckets(struct __hash *h,
191 int nr_buckets,
192 struct bucket **bucket_tab,
193 struct bucket_lock **bucket_locks_tab)
194 {
195 *bucket_tab = (struct bucket*)
196 malloc(nr_buckets * sizeof(struct bucket));
197 *bucket_locks_tab = (struct bucket_lock*)
198 malloc(nr_locks(nr_buckets) * sizeof(struct bucket_lock));
199 }
200
free_entry(struct __hash * h,void * p)201 static void free_entry(struct __hash *h, void *p)
202 {
203 free(p);
204 }
205
free_buckets(struct __hash * h,struct bucket * buckets,struct bucket_lock * bucket_locks)206 static void free_buckets(struct __hash *h,
207 struct bucket *buckets,
208 struct bucket_lock *bucket_locks)
209 {
210 free(buckets);
211 free(bucket_locks);
212 }
213
max_entries(struct __hash * h)214 static int max_entries(struct __hash *h)
215 {
216 /* There are no explicit restrictions to how many entries we can store */
217 return -1;
218 }
219
220 #else
221
222 /*****************************************************************************/
223 /** Memory allocator for shared memory region **/
224 /*****************************************************************************/
225 #define SHM_TABLE_SLOTS 4
226
227 struct shm_hdr
228 {
229 int hash_allocated;
230 pthread_mutex_t mutex;
231 int free_tab_slots[SHM_TABLE_SLOTS];
232
233 unsigned long freelist_offset;
234
235 unsigned long entries_offset;
236 unsigned long nr_entries;
237
238 unsigned long tabs_offset;
239 unsigned long max_tab_size;
240 unsigned long max_lock_tab_size;
241
242 struct __hash hash;
243 };
244
get_shm_baddr(void * hdr)245 static unsigned long get_shm_baddr(void *hdr)
246 {
247 return ((unsigned long)hdr - offsetof(struct shm_hdr, hash));
248 }
249
250
251 /** Locations of various structures/memory areas **/
get_shm_hdr(struct __hash * h)252 static struct shm_hdr* get_shm_hdr(struct __hash *h)
253 {
254 return (struct shm_hdr *)
255 ((unsigned long)h - offsetof(struct shm_hdr, hash));
256 }
257
get_shm_freelist(struct shm_hdr * hdr)258 static uint32_t* get_shm_freelist(struct shm_hdr *hdr)
259 {
260 unsigned long shm_baddr = (unsigned long)hdr;
261 return ((uint32_t *)(shm_baddr + hdr->freelist_offset));
262 }
263
get_shm_entries(struct shm_hdr * hdr)264 static struct hash_entry* get_shm_entries(struct shm_hdr *hdr)
265 {
266 unsigned long shm_baddr = (unsigned long)hdr;
267 return ((struct hash_entry *)(shm_baddr + hdr->entries_offset));
268 }
269
get_shm_tab(struct shm_hdr * hdr,int i)270 static struct bucket* get_shm_tab(struct shm_hdr *hdr, int i)
271 {
272 unsigned long shm_baddr = (unsigned long)hdr;
273 return ((struct bucket *)
274 ((shm_baddr + hdr->tabs_offset) +
275 i * (hdr->max_tab_size + hdr->max_lock_tab_size)));
276 }
277
get_shm_lock_tab(struct shm_hdr * hdr,int i)278 static struct bucket_lock* get_shm_lock_tab(struct shm_hdr *hdr, int i)
279 {
280 unsigned long shm_baddr = (unsigned long)hdr;
281 return ((struct bucket_lock *)
282 ((shm_baddr + hdr->tabs_offset) +
283 i * (hdr->max_tab_size + hdr->max_lock_tab_size) +
284 hdr->max_tab_size));
285 }
286
get_shm_slot(struct shm_hdr * hdr,void * p)287 static int get_shm_slot(struct shm_hdr *hdr, void *p)
288 {
289 unsigned long shm_baddr = (unsigned long)hdr;
290 return ((unsigned long)p - (shm_baddr + hdr->tabs_offset)) /
291 (hdr->max_tab_size + hdr->max_lock_tab_size);
292 }
293
294 /* Shared memory allocator locks */
shm_mutex_init(struct shm_hdr * h)295 static int shm_mutex_init(struct shm_hdr *h)
296 {
297 int ret;
298 pthread_mutexattr_t _attr;
299
300 ret = pthread_mutexattr_init(&_attr);
301 if(ret == 0)
302 ret = pthread_mutexattr_setpshared(&_attr, PTHREAD_PROCESS_SHARED);
303 if(ret == 0)
304 ret = pthread_mutex_init(&h->mutex, &_attr);
305 if(ret == 0)
306 ret = pthread_mutexattr_destroy(&_attr);
307
308 return ret;
309 };
310
shm_mutex_lock(struct shm_hdr * h)311 static int shm_mutex_lock(struct shm_hdr *h)
312 {
313 return pthread_mutex_lock(&h->mutex);
314 }
315
shm_mutex_unlock(struct shm_hdr * h)316 static int shm_mutex_unlock(struct shm_hdr *h)
317 {
318 return pthread_mutex_unlock(&h->mutex);
319 }
320
321
322 /* Shared memory allocator freelist */
shm_add_to_freelist(struct shm_hdr * hdr,uint32_t sl)323 static void shm_add_to_freelist(struct shm_hdr *hdr, uint32_t sl)
324 {
325 uint32_t *freelist = get_shm_freelist(hdr);
326
327 shm_mutex_lock(hdr);
328 freelist[sl+1] = freelist[0];
329 freelist[0] = sl;
330 shm_mutex_unlock(hdr);
331 }
332
shm_get_from_freelist(struct shm_hdr * hdr)333 static uint32_t shm_get_from_freelist(struct shm_hdr *hdr)
334 {
335 uint32_t *freelist = get_shm_freelist(hdr);
336 uint32_t slot;
337
338 shm_mutex_lock(hdr);
339 slot = freelist[0];
340 freelist[0] = freelist[slot+1];
341 shm_mutex_unlock(hdr);
342
343 return (slot == 0 ? -1 : slot);
344 }
345
346
347 #define SHM_ALLOC_MAIN(_n)
348
shm_init_offsets(struct shm_hdr * hdr,int nr_entries)349 static unsigned long shm_init_offsets(
350 struct shm_hdr *hdr, int nr_entries)
351 {
352 hdr->freelist_offset = sizeof(struct shm_hdr);
353
354 /* Freelist needs one extra slot in the array for the freelist head */
355 hdr->entries_offset =
356 hdr->freelist_offset + (nr_entries + 1) * sizeof(uint32_t);
357 hdr->nr_entries = nr_entries;
358
359 hdr->tabs_offset = hdr->entries_offset +
360 nr_entries * sizeof(struct hash_entry);
361 /* We want to allocate table 1.5 larger than the number of entries
362 we want to hold in it */
363 hdr->max_tab_size =
364 (nr_entries * 3 / 2) * sizeof(struct bucket);
365 hdr->max_lock_tab_size =
366 nr_locks(hdr->max_tab_size) * sizeof(struct bucket_lock);
367
368 return hdr->tabs_offset + (hdr->max_tab_size + hdr->max_lock_tab_size) * 4;
369 }
370
__shm_hash_init(unsigned long shm_baddr,unsigned long shm_size)371 struct __hash* __shm_hash_init(unsigned long shm_baddr, unsigned long shm_size)
372 {
373 uint32_t i;
374 struct shm_hdr *hdr;
375
376 /* Some sanity checks */
377 hdr = (struct shm_hdr *)shm_baddr;
378 memset(hdr, 0, sizeof(struct shm_hdr));
379
380 /* Find the maximum number of entries we can store in the given shm_size */
381 for(i=1; shm_init_offsets(hdr, i) < shm_size; i++){};
382 shm_init_offsets(hdr, (i-1));
383
384 memset(get_shm_freelist(hdr), 0,
385 (hdr->nr_entries + 1) * sizeof(uint32_t));
386 if(shm_mutex_init(hdr) != 0)
387 return NULL;
388 for(i=0; i<hdr->nr_entries; i++)
389 shm_add_to_freelist(hdr, i);
390 for(i=0; i<SHM_TABLE_SLOTS; i++)
391 hdr->free_tab_slots[i] = 1;
392
393 shm_mutex_lock(hdr);
394 assert(!hdr->hash_allocated);
395 hdr->hash_allocated = 1;
396 shm_mutex_unlock(hdr);
397
398 return __hash_init(&hdr->hash, 1000);
399 }
400
__shm_hash_get(unsigned long shm_baddr)401 struct __hash* __shm_hash_get(unsigned long shm_baddr)
402 {
403 struct shm_hdr *hdr = (struct shm_hdr *)shm_baddr;
404
405 return (hdr->hash_allocated ? &hdr->hash : NULL);
406 }
407
alloc_entry(struct __hash * h,int size)408 static void* alloc_entry(struct __hash *h, int size)
409 {
410 struct shm_hdr *hdr = get_shm_hdr(h);
411 uint32_t slot = shm_get_from_freelist(hdr);
412
413 assert(size == sizeof(struct hash_entry));
414 if(slot == -1)
415 return NULL;
416
417 return (get_shm_entries(hdr) + slot);
418 }
419
alloc_buckets(struct __hash * h,int nr_buckets,struct bucket ** buckets_tab,struct bucket_lock ** bucket_locks_tab)420 static void alloc_buckets(struct __hash *h,
421 int nr_buckets,
422 struct bucket **buckets_tab,
423 struct bucket_lock **bucket_locks_tab)
424 {
425 struct shm_hdr *hdr = get_shm_hdr(h);
426 int free_slot;
427
428 *buckets_tab = NULL;
429 *bucket_locks_tab = NULL;
430
431 if(((nr_buckets * sizeof(struct bucket)) > hdr->max_tab_size) ||
432 ((nr_locks(nr_buckets) * sizeof(struct bucket_lock)) >
433 hdr->max_lock_tab_size))
434 return;
435
436 shm_mutex_lock(hdr);
437 for(free_slot=0; free_slot<SHM_TABLE_SLOTS; free_slot++)
438 if(hdr->free_tab_slots[free_slot])
439 break;
440 if(free_slot == SHM_TABLE_SLOTS)
441 {
442 shm_mutex_unlock(hdr);
443 return;
444 }
445 hdr->free_tab_slots[free_slot] = 0;
446 shm_mutex_unlock(hdr);
447 *buckets_tab = get_shm_tab(hdr, free_slot);
448 *bucket_locks_tab = get_shm_lock_tab(hdr, free_slot);
449 }
450
free_entry(struct __hash * h,void * p)451 static void free_entry(struct __hash *h, void *p)
452 {
453 struct shm_hdr *hdr = get_shm_hdr(h);
454 uint32_t slot;
455
456 slot = ((uint32_t)((struct hash_entry *)p -
457 get_shm_entries(hdr)));
458 shm_add_to_freelist(hdr, slot);
459 }
460
free_buckets(struct __hash * h,struct bucket * buckets,struct bucket_lock * bucket_locks)461 static void free_buckets(struct __hash *h,
462 struct bucket *buckets,
463 struct bucket_lock *bucket_locks)
464 {
465 struct shm_hdr *hdr = get_shm_hdr(h);
466 int slot;
467
468 if(!buckets || !bucket_locks)
469 {
470 assert(!buckets && !bucket_locks);
471 return;
472 }
473 slot = get_shm_slot(hdr, buckets);
474 assert(slot < SHM_TABLE_SLOTS);
475 assert((char *)bucket_locks == (char *)buckets + hdr->max_tab_size);
476 shm_mutex_lock(hdr);
477 assert(hdr->free_tab_slots[slot] == 0);
478 hdr->free_tab_slots[slot] = 1;
479 shm_mutex_unlock(hdr);
480 }
481
max_entries(struct __hash * h)482 static int max_entries(struct __hash *h)
483 {
484 struct shm_hdr *hdr = get_shm_hdr(h);
485
486 return hdr->nr_entries;
487 }
488
489 #endif /* !BIDIR_USE_STDMALLOC */
490
491
492 /* The structures may be stored in shared memory region, with base address */
493 /* stored in shm_base_addr. All the pointers in the above structures need */
494 /* to be relative to this base address (otherwise they would not make */
495 /* sense to other processes). Bellow accessor functions are used to */
496 /* convert between canonical (base address relative) and local addresses. */
497 /* C2L stands for CANONICAL_TO_LOCAL, and vice versa */
498 #define C2L(_h, _p) ((typeof(_p))((unsigned long)(_p) + \
499 get_shm_baddr(_h)))
500 #define L2C(_h, _p) ((typeof(_p))((unsigned long)(_p) - \
501 get_shm_baddr(_h)))
502
503
504 #define HASH_LOCK_INIT(_h) ({ \
505 int _ret; \
506 pthread_rwlockattr_t _attr; \
507 \
508 h->lock_alive = 1; \
509 _ret = pthread_rwlockattr_init(&_attr); \
510 if(_ret == 0) \
511 _ret = pthread_rwlockattr_setpshared(&_attr, \
512 PTHREAD_PROCESS_SHARED); \
513 if(_ret == 0) \
514 _ret = pthread_rwlock_init(&(_h)->hash_lock, &_attr); \
515 if(_ret == 0) \
516 _ret = pthread_rwlockattr_destroy(&_attr); \
517 \
518 _ret; \
519 })
520
521 #define HASH_LOCK_RDLOCK(_h) ({ \
522 int _ret; \
523 \
524 if(!_h->lock_alive) _ret = ENOLCK; \
525 else \
526 { \
527 struct timespec _ts; \
528 /* 10s timeout, long but ~matches disk spin-up times */ \
529 _ts.tv_sec = time(NULL) + 10; \
530 _ts.tv_nsec = 0; \
531 _ret = pthread_rwlock_timedrdlock(&(_h)->hash_lock, &_ts); \
532 if(_ret == ETIMEDOUT) _h->lock_alive = 0; \
533 } \
534 _ret; \
535 })
536
537 #define HASH_LOCK_RDUNLOCK(_h) \
538 pthread_rwlock_unlock(&(_h)->hash_lock)
539
540 #define HASH_LOCK_WRLOCK(_h) ({ \
541 int _ret; \
542 \
543 if(!_h->lock_alive) _ret = ENOLCK; \
544 else \
545 { \
546 struct timespec _ts; \
547 _ts.tv_sec = time(NULL) + 10; \
548 _ts.tv_nsec = 0UL; \
549 _ret = pthread_rwlock_timedwrlock(&(_h)->hash_lock, &_ts); \
550 if(_ret == ETIMEDOUT) _h->lock_alive = 0; \
551 } \
552 _ret; \
553 })
554
555 #define HASH_LOCK_TRYWRLOCK(_h) ({ \
556 int _ret = (_h->lock_alive ? \
557 pthread_rwlock_trywrlock(&(_h)->hash_lock) : \
558 ENOLCK); \
559 _ret; \
560 })
561
562 #define HASH_LOCK_WRUNLOCK(_h) \
563 pthread_rwlock_unlock(&(_h)->hash_lock)
564
565
566 #define BUCKET_LOCK_INIT(_h, _b) ({ \
567 int _ret; \
568 pthread_rwlockattr_t _attr; \
569 \
570 _ret = pthread_rwlockattr_init(&_attr); \
571 if(_ret == 0) \
572 _ret = pthread_rwlockattr_setpshared(&_attr, \
573 PTHREAD_PROCESS_SHARED); \
574 if(_ret == 0) \
575 _ret = pthread_rwlock_init(&(_b)->bucket_lock, &_attr); \
576 if(_ret == 0) \
577 _ret = pthread_rwlockattr_destroy(&_attr); \
578 \
579 _ret; \
580 })
581
582
583 #define BUCKET_LOCK_RDLOCK(_h, _lock_tab, _idx) ({ \
584 int _ret; \
585 struct timespec _ts; \
586 struct bucket_lock *_lock = &(_lock_tab)[(_idx) / BUCKETS_PER_LOCK]; \
587 \
588 _ts.tv_sec = time(NULL) + 10; \
589 _ts.tv_nsec = 0; \
590 _ret = pthread_rwlock_timedrdlock(&(_lock)->bucket_lock, &_ts); \
591 if(_ret == ETIMEDOUT) (_h)->lock_alive = 0; \
592 _ret; \
593 })
594
595
596 #define BUCKET_LOCK_RDUNLOCK(_h, _lock_tab, _idx) ({ \
597 struct bucket_lock *_lock = &(_lock_tab)[(_idx) / BUCKETS_PER_LOCK]; \
598 pthread_rwlock_unlock(&(_lock)->bucket_lock); \
599 })
600
601 #define BUCKET_LOCK_WRLOCK(_h, _lock_tab, _idx) ({ \
602 int _ret; \
603 struct timespec _ts; \
604 struct bucket_lock *_lock = &(_lock_tab)[(_idx) / BUCKETS_PER_LOCK]; \
605 \
606 _ts.tv_sec = time(NULL) + 10; \
607 _ts.tv_nsec = 0; \
608 _ret = pthread_rwlock_timedwrlock(&(_lock)->bucket_lock, &_ts); \
609 if(_ret == ETIMEDOUT) (_h)->lock_alive = 0; \
610 _ret; \
611 })
612
613 #define BUCKET_LOCK_WRUNLOCK(_h, _lock_tab, _idx) ({ \
614 struct bucket_lock *_lock = &(_lock_tab)[(_idx) / BUCKETS_PER_LOCK]; \
615 pthread_rwlock_unlock(&(_lock)->bucket_lock); \
616 })
617
618 #define TWO_BUCKETS_LOCK_WRLOCK(_h, _blt1, _idx1, _blt2, _idx2) ({ \
619 int _ret; \
620 pthread_rwlock_t *_l1, *_l2; \
621 struct timespec _ts; \
622 struct bucket_lock *_bl1 = &(_blt1)[(_idx1) / BUCKETS_PER_LOCK]; \
623 struct bucket_lock *_bl2 = &(_blt2)[(_idx2) / BUCKETS_PER_LOCK]; \
624 \
625 assert((_bl1) != (_bl2)); \
626 if((_bl1) < (_bl2)) \
627 { \
628 _l1 = &(_bl1)->bucket_lock; \
629 _l2 = &(_bl2)->bucket_lock; \
630 } \
631 else \
632 { \
633 _l1 = &(_bl2)->bucket_lock; \
634 _l2 = &(_bl1)->bucket_lock; \
635 } \
636 _ts.tv_sec = time(NULL) + 10; \
637 _ts.tv_nsec = 0; \
638 _ret = pthread_rwlock_timedwrlock(_l1, &_ts); \
639 _ts.tv_sec = time(NULL) + 10; \
640 _ts.tv_nsec = 0; \
641 if(_ret == 0) \
642 _ret = pthread_rwlock_timedwrlock(_l2, &_ts); \
643 if(_ret == ETIMEDOUT) (_h)->lock_alive = 0; \
644 \
645 _ret; \
646 })
647
648 #define TWO_BUCKETS_LOCK_WRUNLOCK(_h, _blt1, _idx1, _blt2, _idx2) ({ \
649 int _ret; \
650 struct bucket_lock *_bl1 = &(_blt1)[(_idx1) / BUCKETS_PER_LOCK]; \
651 struct bucket_lock *_bl2 = &(_blt2)[(_idx2) / BUCKETS_PER_LOCK]; \
652 \
653 _ret = pthread_rwlock_unlock(&(_bl1)->bucket_lock); \
654 if(_ret == 0) \
655 _ret = pthread_rwlock_unlock(&(_bl2)->bucket_lock); \
656 \
657 _ret; \
658 })
659
660
661
662
hash_to_idx(struct __hash * h,uint32_t hash)663 static uint32_t hash_to_idx(struct __hash *h, uint32_t hash)
664 {
665 return (hash % h->tab_size);
666 }
667
alloc_tab(struct __hash * h,int size,struct bucket ** buckets_tab,struct bucket_lock ** bucket_locks_tab)668 static void alloc_tab(struct __hash *h,
669 int size,
670 struct bucket **buckets_tab,
671 struct bucket_lock **bucket_locks_tab)
672 {
673 int i;
674
675 alloc_buckets(h, size, buckets_tab, bucket_locks_tab);
676 if(!(*buckets_tab) || !(*bucket_locks_tab))
677 goto error_out;
678 memset(*buckets_tab, 0, size * sizeof(struct bucket));
679 memset(*bucket_locks_tab, 0, nr_locks(size) * sizeof(struct bucket_lock));
680 for(i=0; i<nr_locks(size); i++)
681 if(BUCKET_LOCK_INIT(h, *bucket_locks_tab + i) != 0)
682 goto error_out;
683
684 return;
685 error_out:
686 free_buckets(h, *buckets_tab, *bucket_locks_tab);
687 *buckets_tab = NULL;
688 *bucket_locks_tab = NULL;
689 return;
690 }
691
692
__hash_init(struct __hash * h,uint32_t min_size)693 struct __hash *__hash_init(struct __hash *h, uint32_t min_size)
694 {
695 uint32_t size;
696 uint16_t size_idx;
697 struct bucket *buckets;
698 struct bucket_lock *bucket_locks;
699
700 /* Sanity check on args */
701 if (min_size > hash_sizes[hash_sizes_len-1]) return NULL;
702 /* Find least size greater than init_size */
703 for(size_idx = 0; size_idx < hash_sizes_len; size_idx++)
704 if(hash_sizes[size_idx] >= min_size)
705 break;
706 size = hash_sizes[size_idx];
707
708 if(!h) return NULL;
709 alloc_tab(h, size, &buckets, &bucket_locks);
710 if(!buckets || !bucket_locks) goto alloc_fail;
711 h->key_tab = L2C(h, buckets);
712 h->key_lock_tab = L2C(h, bucket_locks);
713 alloc_tab(h, size, &buckets, &bucket_locks);
714 if(!buckets || !bucket_locks) goto alloc_fail;
715 h->value_tab = L2C(h, buckets);
716 h->value_lock_tab = L2C(h, bucket_locks);
717 /* Init all h variables */
718 if(HASH_LOCK_INIT(h) != 0) goto alloc_fail;
719 h->nr_ent = 0;
720 h->tab_size = size;
721 h->size_idx = size_idx;
722 h->max_load = (uint32_t)ceilf(hash_max_load_fact * size);
723 h->min_load = (uint32_t)ceilf(hash_min_load_fact * size);
724
725 return h;
726
727 alloc_fail:
728 if(h->key_tab || h->key_lock_tab)
729 free_buckets(h, C2L(h, h->key_tab), C2L(h, h->key_lock_tab));
730 return NULL;
731 }
732
733 #undef __prim
734 #undef __prim_t
735 #undef __prim_tab
736 #undef __prim_lock_tab
737 #undef __prim_hash
738 #undef __prim_cmp
739 #undef __prim_next
740 #undef __sec
741 #undef __sec_t
742
743 #define __prim key
744 #define __prim_t __k_t
745 #define __prim_tab key_tab
746 #define __prim_lock_tab key_lock_tab
747 #define __prim_hash __key_hash
748 #define __prim_cmp __key_cmp
749 #define __prim_next key_next
750 #define __sec value
751 #define __sec_t __v_t
__key_lookup(struct __hash * h,__prim_t k,__sec_t * vp)752 int __key_lookup(struct __hash *h, __prim_t k, __sec_t *vp)
753 {
754 struct hash_entry *entry;
755 struct bucket *b;
756 struct bucket_lock *blt;
757 uint32_t idx;
758
759 if(HASH_LOCK_RDLOCK(h) != 0) return -ENOLCK;
760 idx = hash_to_idx(h, __prim_hash(k));
761 b = C2L(h, &h->__prim_tab[idx]);
762 blt = C2L(h, h->__prim_lock_tab);
763 if(BUCKET_LOCK_RDLOCK(h, blt, idx) != 0) return -ENOLCK;
764 entry = b->hash_entry;
765 while(entry != NULL)
766 {
767 entry = C2L(h, entry);
768 if(__prim_cmp(k, entry->__prim))
769 {
770 /* Unlock here */
771 *vp = entry->__sec;
772 BUCKET_LOCK_RDUNLOCK(h, blt, idx);
773 HASH_LOCK_RDUNLOCK(h);
774 return 1;
775 }
776 entry = entry->__prim_next;
777 }
778 BUCKET_LOCK_RDUNLOCK(h, blt, idx);
779 HASH_LOCK_RDUNLOCK(h);
780 return 0;
781 }
782
783 /* value lookup is an almost exact copy of key lookup */
784 #undef __prim
785 #undef __prim_t
786 #undef __prim_tab
787 #undef __prim_lock_tab
788 #undef __prim_hash
789 #undef __prim_cmp
790 #undef __prim_next
791 #undef __sec
792 #undef __sec_t
793
794 #define __prim value
795 #define __prim_t __v_t
796 #define __prim_tab value_tab
797 #define __prim_lock_tab value_lock_tab
798 #define __prim_hash __value_hash
799 #define __prim_cmp __value_cmp
800 #define __prim_next value_next
801 #define __sec key
802 #define __sec_t __k_t
__value_lookup(struct __hash * h,__prim_t k,__sec_t * vp)803 int __value_lookup(struct __hash *h, __prim_t k, __sec_t *vp)
804 {
805 struct hash_entry *entry;
806 struct bucket *b;
807 struct bucket_lock *blt;
808 uint32_t idx;
809
810 if(HASH_LOCK_RDLOCK(h) != 0) return -ENOLCK;
811 idx = hash_to_idx(h, __prim_hash(k));
812 b = C2L(h, &h->__prim_tab[idx]);
813 blt = C2L(h, h->__prim_lock_tab);
814 if(BUCKET_LOCK_RDLOCK(h, blt, idx) != 0) return -ENOLCK;
815 entry = b->hash_entry;
816 while(entry != NULL)
817 {
818 entry = C2L(h, entry);
819 if(__prim_cmp(k, entry->__prim))
820 {
821 /* Unlock here */
822 *vp = entry->__sec;
823 BUCKET_LOCK_RDUNLOCK(h, blt, idx);
824 HASH_LOCK_RDUNLOCK(h);
825 return 1;
826 }
827 entry = entry->__prim_next;
828 }
829 BUCKET_LOCK_RDUNLOCK(h, blt, idx);
830 HASH_LOCK_RDUNLOCK(h);
831 return 0;
832 }
833
__insert(struct __hash * h,__k_t k,__v_t v)834 int __insert(struct __hash *h, __k_t k, __v_t v)
835 {
836 uint32_t k_idx, v_idx;
837 struct hash_entry *entry;
838 struct bucket *bk, *bv;
839 struct bucket_lock *bltk, *bltv;
840
841 /* Allocate new entry before any locks (in case it fails) */
842 entry = (struct hash_entry*)
843 alloc_entry(h, sizeof(struct hash_entry));
844 if(!entry) return 0;
845
846 if(HASH_LOCK_RDLOCK(h) != 0) return -ENOLCK;
847 /* Read from nr_ent is atomic(TODO check), no need for fancy accessors */
848 if(h->nr_ent+1 > h->max_load)
849 {
850 /* Resize needs the write lock, drop read lock temporarily */
851 HASH_LOCK_RDUNLOCK(h);
852 hash_resize(h);
853 if(HASH_LOCK_RDLOCK(h) != 0) return -ENOLCK;
854 }
855
856 /* Init the entry */
857 entry->key = k;
858 entry->value = v;
859
860 /* Work out the indicies */
861 k_idx = hash_to_idx(h, __key_hash(k));
862 v_idx = hash_to_idx(h, __value_hash(v));
863
864 /* Insert */
865 bk = C2L(h, &h->key_tab[k_idx]);
866 bv = C2L(h, &h->value_tab[v_idx]);
867 bltk = C2L(h, h->key_lock_tab);
868 bltv = C2L(h, h->value_lock_tab);
869 if(TWO_BUCKETS_LOCK_WRLOCK(h, bltk, k_idx, bltv, v_idx) != 0)
870 return -ENOLCK;
871 entry->key_next = bk->hash_entry;
872 bk->hash_entry = L2C(h, entry);
873 entry->value_next = bv->hash_entry;
874 bv->hash_entry = L2C(h, entry);
875 TWO_BUCKETS_LOCK_WRUNLOCK(h, bltk, k_idx, bltv, v_idx);
876
877 /* Book keeping */
878 atomic_inc(&h->nr_ent);
879
880 HASH_LOCK_RDUNLOCK(h);
881
882 return 1;
883 }
884
885
886 #undef __prim
887 #undef __prim_t
888 #undef __prim_tab
889 #undef __prim_lock_tab
890 #undef __prim_hash
891 #undef __prim_cmp
892 #undef __prim_next
893 #undef __sec
894 #undef __sec_t
895 #undef __sec_tab
896 #undef __sec_lock_tab
897 #undef __sec_hash
898 #undef __sec_next
899
900 #define __prim key
901 #define __prim_t __k_t
902 #define __prim_tab key_tab
903 #define __prim_lock_tab key_lock_tab
904 #define __prim_hash __key_hash
905 #define __prim_cmp __key_cmp
906 #define __prim_next key_next
907 #define __sec value
908 #define __sec_t __v_t
909 #define __sec_tab value_tab
910 #define __sec_lock_tab value_lock_tab
911 #define __sec_hash __value_hash
912 #define __sec_next value_next
913
__key_remove(struct __hash * h,__prim_t k,__sec_t * vp)914 int __key_remove(struct __hash *h, __prim_t k, __sec_t *vp)
915 {
916 struct hash_entry *e, *es, **pek, **pev;
917 struct bucket *bk, *bv;
918 struct bucket_lock *bltk, *bltv;
919 uint32_t old_kidx, kidx, vidx, min_load, nr_ent;
920 __prim_t ks;
921 __sec_t vs;
922
923 if(HASH_LOCK_RDLOCK(h) != 0) return -ENOLCK;
924
925 again:
926 old_kidx = kidx = hash_to_idx(h, __prim_hash(k));
927 bk = C2L(h, &h->__prim_tab[kidx]);
928 bltk = C2L(h, h->__prim_lock_tab);
929 if(BUCKET_LOCK_RDLOCK(h, bltk, kidx) != 0) return -ENOLCK;
930 pek = &(bk->hash_entry);
931 e = *pek;
932 while(e != NULL)
933 {
934 e = C2L(h, e);
935 if(__prim_cmp(k, e->__prim))
936 {
937 goto found;
938 }
939 pek = &(e->__prim_next);
940 e = *pek;
941 }
942
943 BUCKET_LOCK_RDUNLOCK(h, bltk, kidx);
944 HASH_LOCK_RDUNLOCK(h);
945
946 return 0;
947
948 found:
949 /*
950 * Make local copy of key and value.
951 */
952 es = e;
953 ks = e->__prim;
954 vs = e->__sec;
955 kidx = hash_to_idx(h, __prim_hash(ks));
956 /* Being paranoid: check if kidx has not changed, so that we unlock the
957 * right bucket */
958 assert(old_kidx == kidx);
959 vidx = hash_to_idx(h, __sec_hash(vs));
960 bk = C2L(h, &h->__prim_tab[kidx]);
961 bv = C2L(h, &h->__sec_tab[vidx]);
962 bltk = C2L(h, h->__prim_lock_tab);
963 bltv = C2L(h, h->__sec_lock_tab);
964 BUCKET_LOCK_RDUNLOCK(h, bltk, kidx);
965 if(TWO_BUCKETS_LOCK_WRLOCK(h, bltk, kidx, bltv, vidx) != 0) return -ENOLCK;
966 pek = &(bk->hash_entry);
967 pev = &(bv->hash_entry);
968
969 /* Find the entry in both tables */
970 e = *pek;
971 while(e != NULL)
972 {
973 e = C2L(h, e);
974 if(e == es)
975 {
976 /* Being paranoid: make sure that the key and value are
977 * still the same. This is still not 100%, because, in
978 * principle, the entry could have got deleted, when we
979 * didn't hold the locks for a little while, and exactly
980 * the same entry reinserted. If the __k_t & __v_t are
981 * simple types than it probably doesn't matter, but if
982 * either is a pointer type, the actual structure might
983 * now be different. The chances that happens are very
984 * slim, but still, if that's a problem, the user needs to
985 * pay attention to the structure re-allocation */
986 if((memcmp(&(e->__prim), &ks, sizeof(__prim_t))) ||
987 (memcmp(&(e->__sec), &vs, sizeof(__sec_t))))
988 break;
989 goto found_again;
990 }
991 pek = &(e->__prim_next);
992 e = *pek;
993 }
994
995 TWO_BUCKETS_LOCK_WRUNLOCK(h, bltk, kidx, bltv, vidx);
996
997 /* Entry got removed in the meantime, try again */
998 goto again;
999
1000 found_again:
1001 /* We are now comitted to the removal */
1002 e = *pev;
1003 while(e != NULL)
1004 {
1005 e = C2L(h, e);
1006 if(e == es)
1007 {
1008 /* Both pek and pev are pointing to the right place, remove */
1009 *pek = e->__prim_next;
1010 *pev = e->__sec_next;
1011
1012 atomic_dec(&h->nr_ent);
1013 nr_ent = h->nr_ent;
1014 /* read min_load still under the hash lock! */
1015 min_load = h->min_load;
1016
1017 TWO_BUCKETS_LOCK_WRUNLOCK(h, bltk, kidx, bltv, vidx);
1018 HASH_LOCK_RDUNLOCK(h);
1019
1020 if(nr_ent < min_load)
1021 hash_resize(h);
1022 if(vp != NULL)
1023 *vp = e->__sec;
1024 free_entry(h, e);
1025 return 1;
1026 }
1027 pev = &(e->__sec_next);
1028 e = *pev;
1029 }
1030
1031 /* We should never get here!, no need to unlock anything */
1032 return -ENOLCK;
1033 }
1034
1035 #undef __prim
1036 #undef __prim_t
1037 #undef __prim_tab
1038 #undef __prim_lock_tab
1039 #undef __prim_hash
1040 #undef __prim_cmp
1041 #undef __prim_next
1042 #undef __sec
1043 #undef __sec_t
1044 #undef __sec_tab
1045 #undef __sec_lock_tab
1046 #undef __sec_hash
1047 #undef __sec_next
1048
1049 #define __prim value
1050 #define __prim_t __v_t
1051 #define __prim_tab value_tab
1052 #define __prim_lock_tab value_lock_tab
1053 #define __prim_hash __value_hash
1054 #define __prim_cmp __value_cmp
1055 #define __prim_next value_next
1056 #define __sec key
1057 #define __sec_t __k_t
1058 #define __sec_tab key_tab
1059 #define __sec_lock_tab key_lock_tab
1060 #define __sec_hash __key_hash
1061 #define __sec_next key_next
1062
__value_remove(struct __hash * h,__prim_t k,__sec_t * vp)1063 int __value_remove(struct __hash *h, __prim_t k, __sec_t *vp)
1064 {
1065 struct hash_entry *e, *es, **pek, **pev;
1066 struct bucket *bk, *bv;
1067 struct bucket_lock *bltk, *bltv;
1068 uint32_t old_kidx, kidx, vidx, min_load, nr_ent;
1069 __prim_t ks;
1070 __sec_t vs;
1071
1072 if(HASH_LOCK_RDLOCK(h) != 0) return -ENOLCK;
1073
1074 again:
1075 old_kidx = kidx = hash_to_idx(h, __prim_hash(k));
1076 bk = C2L(h, &h->__prim_tab[kidx]);
1077 bltk = C2L(h, h->__prim_lock_tab);
1078 if(BUCKET_LOCK_RDLOCK(h, bltk, kidx) != 0) return -ENOLCK;
1079 pek = &(bk->hash_entry);
1080 e = *pek;
1081 while(e != NULL)
1082 {
1083 e = C2L(h, e);
1084 if(__prim_cmp(k, e->__prim))
1085 {
1086 goto found;
1087 }
1088 pek = &(e->__prim_next);
1089 e = *pek;
1090 }
1091
1092 BUCKET_LOCK_RDUNLOCK(h, bltk, kidx);
1093 HASH_LOCK_RDUNLOCK(h);
1094
1095 return 0;
1096
1097 found:
1098 /*
1099 * Make local copy of key and value.
1100 */
1101 es = e;
1102 ks = e->__prim;
1103 vs = e->__sec;
1104 kidx = hash_to_idx(h, __prim_hash(ks));
1105 /* Being paranoid: check if kidx has not changed, so that we unlock the
1106 * right bucket */
1107 assert(old_kidx == kidx);
1108 vidx = hash_to_idx(h, __sec_hash(vs));
1109 bk = C2L(h, &h->__prim_tab[kidx]);
1110 bv = C2L(h, &h->__sec_tab[vidx]);
1111 bltk = C2L(h, h->__prim_lock_tab);
1112 bltv = C2L(h, h->__sec_lock_tab);
1113 BUCKET_LOCK_RDUNLOCK(h, bltk, kidx);
1114 if(TWO_BUCKETS_LOCK_WRLOCK(h, bltk, kidx, bltv, vidx) != 0) return -ENOLCK;
1115 pek = &(bk->hash_entry);
1116 pev = &(bv->hash_entry);
1117
1118 /* Find the entry in both tables */
1119 e = *pek;
1120 while(e != NULL)
1121 {
1122 e = C2L(h, e);
1123 if(e == es)
1124 {
1125 /* Being paranoid: make sure that the key and value are
1126 * still the same. This is still not 100%, because, in
1127 * principle, the entry could have got deleted, when we
1128 * didn't hold the locks for a little while, and exactly
1129 * the same entry reinserted. If the __k_t & __v_t are
1130 * simple types than it probably doesn't matter, but if
1131 * either is a pointer type, the actual structure might
1132 * now be different. The chances that happens are very
1133 * slim, but still, if that's a problem, the user needs to
1134 * pay attention to the structure re-allocation */
1135 if((memcmp(&(e->__prim), &ks, sizeof(__prim_t))) ||
1136 (memcmp(&(e->__sec), &vs, sizeof(__sec_t))))
1137 break;
1138 goto found_again;
1139 }
1140 pek = &(e->__prim_next);
1141 e = *pek;
1142 }
1143
1144 TWO_BUCKETS_LOCK_WRUNLOCK(h, bltk, kidx, bltv, vidx);
1145
1146 /* Entry got removed in the meantime, try again */
1147 goto again;
1148
1149 found_again:
1150 /* We are now comitted to the removal */
1151 e = *pev;
1152 while(e != NULL)
1153 {
1154 e = C2L(h, e);
1155 if(e == es)
1156 {
1157 /* Both pek and pev are pointing to the right place, remove */
1158 *pek = e->__prim_next;
1159 *pev = e->__sec_next;
1160
1161 atomic_dec(&h->nr_ent);
1162 nr_ent = h->nr_ent;
1163 /* read min_load still under the hash lock! */
1164 min_load = h->min_load;
1165
1166 TWO_BUCKETS_LOCK_WRUNLOCK(h, bltk, kidx, bltv, vidx);
1167 HASH_LOCK_RDUNLOCK(h);
1168
1169 if(nr_ent < min_load)
1170 hash_resize(h);
1171 if(vp != NULL)
1172 *vp = e->__sec;
1173 free_entry(h, e);
1174 return 1;
1175 }
1176 pev = &(e->__sec_next);
1177 e = *pev;
1178 }
1179
1180 /* We should never get here!, no need to unlock anything */
1181 return -ENOLCK;
1182 }
1183
1184
__hash_destroy(struct __hash * h,void (* entry_consumer)(__k_t k,__v_t v,void * p),void * d)1185 int __hash_destroy(struct __hash *h,
1186 void (*entry_consumer)(__k_t k, __v_t v, void *p),
1187 void *d)
1188 {
1189 struct hash_entry *e, *n;
1190 struct bucket *b;
1191 int i;
1192
1193 if(HASH_LOCK_WRLOCK(h) != 0) return -ENOLCK;
1194
1195 /* No need to lock individual buckets, with hash write lock */
1196 for(i=0; i < h->tab_size; i++)
1197 {
1198 b = C2L(h, &h->key_tab[i]);
1199 e = b->hash_entry;
1200 while(e != NULL)
1201 {
1202 e = C2L(h, e);
1203 n = e->key_next;
1204 if(entry_consumer)
1205 entry_consumer(e->key, e->value, d);
1206 free_entry(h, e);
1207 e = n;
1208 }
1209 }
1210 free_buckets(h, C2L(h, h->key_tab), C2L(h, h->key_lock_tab));
1211 free_buckets(h, C2L(h, h->value_tab), C2L(h, h->value_lock_tab));
1212
1213 HASH_LOCK_WRUNLOCK(h);
1214 h->lock_alive = 0;
1215
1216 return 0;
1217 }
1218
hash_resize(struct __hash * h)1219 static void hash_resize(struct __hash *h)
1220 {
1221 int new_size_idx, i, lock_ret;
1222 uint32_t size, old_size, kidx, vidx;
1223 struct bucket *t1, *t2, *b;
1224 struct bucket_lock *l1, *l2;
1225 struct hash_entry *e, *n;
1226
1227 /* We may fail to allocate the lock, if the resize is triggered while
1228 we are iterating (under read lock) */
1229 lock_ret = HASH_LOCK_TRYWRLOCK(h);
1230 if(lock_ret != 0) return;
1231
1232 new_size_idx = h->size_idx;
1233 /* Work out the new size */
1234 if(h->nr_ent >= h->max_load)
1235 new_size_idx = h->size_idx+1;
1236 if(h->nr_ent < h->min_load)
1237 new_size_idx = h->size_idx-1;
1238 if((new_size_idx == h->size_idx) ||
1239 (new_size_idx >= hash_sizes_len) ||
1240 (new_size_idx < 0))
1241 {
1242 HASH_LOCK_WRUNLOCK(h);
1243 return;
1244 }
1245
1246 size = hash_sizes[new_size_idx];
1247
1248 /* Allocate the new sizes */
1249 t1 = t2 = NULL;
1250 l1 = l2 = NULL;
1251 alloc_tab(h, size, &t1, &l1);
1252 if(!t1 || !l1) goto alloc_fail;
1253 alloc_tab(h, size, &t2, &l2);
1254 if(!t2 || !l2) goto alloc_fail;
1255
1256 old_size = h->tab_size;
1257 h->tab_size = size;
1258 h->size_idx = new_size_idx;
1259 h->max_load = (uint32_t)ceilf(hash_max_load_fact * size);
1260 h->min_load = (uint32_t)ceilf(hash_min_load_fact * size);
1261
1262 /* Move the entries */
1263 for(i=0; i < old_size; i++)
1264 {
1265 b = C2L(h, &h->key_tab[i]);
1266 e = b->hash_entry;
1267 while(e != NULL)
1268 {
1269 e = C2L(h, e);
1270 n = e->key_next;
1271 kidx =hash_to_idx(h, __key_hash(e->key));
1272 vidx =hash_to_idx(h, __value_hash(e->value));
1273 /* Move to the correct bucket */
1274 e->key_next = t1[kidx].hash_entry;
1275 t1[kidx].hash_entry = L2C(h, e);
1276 e->value_next = t2[vidx].hash_entry;
1277 t2[vidx].hash_entry = L2C(h, e);
1278 e = n;
1279 }
1280 }
1281 free_buckets(h, C2L(h, h->key_tab), C2L(h, h->key_lock_tab));
1282 free_buckets(h, C2L(h, h->value_tab), C2L(h, h->value_lock_tab));
1283 h->key_tab = L2C(h, t1);
1284 h->key_lock_tab = L2C(h, l1);
1285 h->value_tab = L2C(h, t2);
1286 h->value_lock_tab = L2C(h, l2);
1287
1288 HASH_LOCK_WRUNLOCK(h);
1289
1290 return;
1291
1292 alloc_fail:
1293 /* If we failed to resize, adjust max/min load. This will stop us from
1294 * retrying resize too frequently */
1295 if(new_size_idx > h->size_idx)
1296 h->max_load = (h->max_load + 2 * h->tab_size) / 2 + 1;
1297 else
1298 if (new_size_idx < h->size_idx)
1299 h->min_load = h->min_load / 2;
1300 HASH_LOCK_WRUNLOCK(h);
1301 if(t1 || l1) free_buckets(h, t1, l1);
1302 if(t2 || l2) free_buckets(h, t2, l2);
1303 return;
1304 }
1305
__hash_iterator(struct __hash * h,int (* entry_consumer)(__k_t k,__v_t v,void * p),void * d)1306 int __hash_iterator(struct __hash *h,
1307 int (*entry_consumer)(__k_t k, __v_t v, void *p),
1308 void *d)
1309 {
1310 struct hash_entry *e, *n;
1311 struct bucket *b;
1312 struct bucket_lock *blt;
1313 int i, brk_early;
1314
1315 if(HASH_LOCK_RDLOCK(h) != 0) return -ENOLCK;
1316
1317 for(i=0; i < h->tab_size; i++)
1318 {
1319 b = C2L(h, &h->key_tab[i]);
1320 blt = C2L(h, h->key_lock_tab);
1321 if(BUCKET_LOCK_RDLOCK(h, blt, i) != 0) return -ENOLCK;
1322 e = b->hash_entry;
1323 while(e != NULL)
1324 {
1325 e = C2L(h, e);
1326 n = e->key_next;
1327 brk_early = entry_consumer(e->key, e->value, d);
1328 if(brk_early)
1329 {
1330 BUCKET_LOCK_RDUNLOCK(h, blt, i);
1331 goto out;
1332 }
1333 e = n;
1334 }
1335 BUCKET_LOCK_RDUNLOCK(h, blt, i);
1336 }
1337 out:
1338 HASH_LOCK_RDUNLOCK(h);
1339 return 0;
1340 }
1341
__hash_sizes(struct __hash * h,uint32_t * nr_ent,uint32_t * max_nr_ent,uint32_t * tab_size,uint32_t * max_load,uint32_t * min_load)1342 void __hash_sizes(struct __hash *h,
1343 uint32_t *nr_ent,
1344 uint32_t *max_nr_ent,
1345 uint32_t *tab_size,
1346 uint32_t *max_load,
1347 uint32_t *min_load)
1348 {
1349 if(nr_ent != NULL) *nr_ent = h->nr_ent;
1350 if(max_nr_ent != NULL) *max_nr_ent = max_entries(h);
1351 if(tab_size != NULL) *tab_size = h->tab_size;
1352 if(max_load != NULL) *max_load = h->max_load;
1353 if(min_load != NULL) *min_load = h->min_load;
1354 }
1355
1356