1 /* Copyright (C) 2000-2019 Red Hat, Inc.
2 This file is part of elfutils.
3 Written by Srdan Milakovic <sm108@rice.edu>, 2019.
4 Derived from Ulrich Drepper <drepper@redhat.com>, 2000.
6 This file is free software; you can redistribute it and/or modify
7 it under the terms of either
9 * the GNU Lesser General Public License as published by the Free
10 Software Foundation; either version 3 of the License, or (at
11 your option) any later version
15 * the GNU General Public License as published by the Free
16 Software Foundation; either version 2 of the License, or (at
17 your option) any later version
19 or both in parallel, as here.
21 elfutils is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received copies of the GNU General Public License and
27 the GNU Lesser General Public License along with this program. If
28 not, see <http://www.gnu.org/licenses/>. */
35 /* Before including this file the following macros must be defined:
37 NAME name of the hash table structure.
38 TYPE data type of the hash table entries
43 lookup (NAME *htab, HASHTYPE hval)
45 /* First hash function: simply take the modul but prevent zero. Small values
46 can skip the division, which helps performance when this is common. */
47 size_t idx = 1 + (hval < htab->size ? hval : hval % htab->size);
51 hash = atomic_load_explicit(&htab->table[idx].hashval,
52 memory_order_acquire);
58 /* Second hash function as suggested in [Knuth]. */
59 HASHTYPE second_hash = 1 + hval % (htab->size - 2);
63 if (idx <= second_hash)
64 idx = htab->size + idx - second_hash;
68 hash = atomic_load_explicit(&htab->table[idx].hashval,
69 memory_order_acquire);
78 insert_helper (NAME *htab, HASHTYPE hval, TYPE val)
80 /* First hash function: simply take the modul but prevent zero. Small values
81 can skip the division, which helps performance when this is common. */
82 size_t idx = 1 + (hval < htab->size ? hval : hval % htab->size);
87 hash = atomic_load_explicit(&htab->table[idx].hashval,
88 memory_order_acquire);
94 atomic_compare_exchange_strong_explicit(&htab->table[idx].val_ptr,
95 (uintptr_t *) &val_ptr,
98 memory_order_acquire);
102 atomic_store_explicit(&htab->table[idx].hashval, hval,
103 memory_order_release);
110 hash = atomic_load_explicit(&htab->table[idx].hashval,
111 memory_order_acquire);
119 /* Second hash function as suggested in [Knuth]. */
120 HASHTYPE second_hash = 1 + hval % (htab->size - 2);
124 if (idx <= second_hash)
125 idx = htab->size + idx - second_hash;
129 hash = atomic_load_explicit(&htab->table[idx].hashval,
130 memory_order_acquire);
136 atomic_compare_exchange_strong_explicit(&htab->table[idx].val_ptr,
137 (uintptr_t *) &val_ptr,
139 memory_order_acquire,
140 memory_order_acquire);
144 atomic_store_explicit(&htab->table[idx].hashval, hval,
145 memory_order_release);
152 hash = atomic_load_explicit(&htab->table[idx].hashval,
153 memory_order_acquire);
163 #define NO_RESIZING 0u
164 #define ALLOCATING_MEMORY 1u
165 #define MOVING_DATA 3u
168 #define STATE_BITS 2u
169 #define STATE_INCREMENT (1u << STATE_BITS)
170 #define STATE_MASK (STATE_INCREMENT - 1)
171 #define GET_STATE(A) ((A) & STATE_MASK)
173 #define IS_NO_RESIZE_OR_CLEANING(A) (((A) & 0x1u) == 0)
175 #define GET_ACTIVE_WORKERS(A) ((A) >> STATE_BITS)
177 #define INITIALIZATION_BLOCK_SIZE 256
178 #define MOVE_BLOCK_SIZE 256
179 #define CEIL(A, B) (((A) + (B) - 1) / (B))
181 /* Initializes records and copies the data from the old table.
182 It can share work with other threads */
183 static void resize_helper(NAME *htab, int blocking)
185 size_t num_old_blocks = CEIL(htab->old_size, MOVE_BLOCK_SIZE);
186 size_t num_new_blocks = CEIL(htab->size, INITIALIZATION_BLOCK_SIZE);
189 size_t num_finished_blocks = 0;
191 while ((my_block = atomic_fetch_add_explicit(&htab->next_init_block, 1,
192 memory_order_acquire))
195 size_t record_it = my_block * INITIALIZATION_BLOCK_SIZE;
196 size_t record_end = (my_block + 1) * INITIALIZATION_BLOCK_SIZE;
197 if (record_end > htab->size)
198 record_end = htab->size;
200 while (record_it++ != record_end)
202 atomic_init(&htab->table[record_it].hashval, (uintptr_t) NULL);
203 atomic_init(&htab->table[record_it].val_ptr, (uintptr_t) NULL);
206 num_finished_blocks++;
209 atomic_fetch_add_explicit(&htab->num_initialized_blocks,
210 num_finished_blocks, memory_order_release);
211 while (atomic_load_explicit(&htab->num_initialized_blocks,
212 memory_order_acquire) != num_new_blocks);
214 /* All block are initialized, start moving */
215 num_finished_blocks = 0;
216 while ((my_block = atomic_fetch_add_explicit(&htab->next_move_block, 1,
217 memory_order_acquire))
220 size_t record_it = my_block * MOVE_BLOCK_SIZE;
221 size_t record_end = (my_block + 1) * MOVE_BLOCK_SIZE;
222 if (record_end > htab->old_size)
223 record_end = htab->old_size;
225 while (record_it++ != record_end)
227 TYPE val_ptr = (TYPE) atomic_load_explicit(
228 &htab->old_table[record_it].val_ptr,
229 memory_order_acquire);
233 HASHTYPE hashval = atomic_load_explicit(
234 &htab->old_table[record_it].hashval,
235 memory_order_acquire);
238 insert_helper(htab, hashval, val_ptr);
241 num_finished_blocks++;
244 atomic_fetch_add_explicit(&htab->num_moved_blocks, num_finished_blocks,
245 memory_order_release);
248 while (atomic_load_explicit(&htab->num_moved_blocks,
249 memory_order_acquire) != num_old_blocks);
253 resize_master(NAME *htab)
255 htab->old_size = htab->size;
256 htab->old_table = htab->table;
258 htab->size = next_prime(htab->size * 2);
259 htab->table = malloc((1 + htab->size) * sizeof(htab->table[0]));
262 /* Change state from ALLOCATING_MEMORY to MOVING_DATA */
263 atomic_fetch_xor_explicit(&htab->resizing_state,
264 ALLOCATING_MEMORY ^ MOVING_DATA,
265 memory_order_release);
267 resize_helper(htab, 1);
269 /* Change state from MOVING_DATA to CLEANING */
270 size_t resize_state = atomic_fetch_xor_explicit(&htab->resizing_state,
271 MOVING_DATA ^ CLEANING,
272 memory_order_acq_rel);
273 while (GET_ACTIVE_WORKERS(resize_state) != 0)
274 resize_state = atomic_load_explicit(&htab->resizing_state,
275 memory_order_acquire);
277 /* There are no more active workers */
278 atomic_store_explicit(&htab->next_init_block, 0, memory_order_relaxed);
279 atomic_store_explicit(&htab->num_initialized_blocks, 0,
280 memory_order_relaxed);
282 atomic_store_explicit(&htab->next_move_block, 0, memory_order_relaxed);
283 atomic_store_explicit(&htab->num_moved_blocks, 0, memory_order_relaxed);
285 free(htab->old_table);
287 /* Change state to NO_RESIZING */
288 atomic_fetch_xor_explicit(&htab->resizing_state, CLEANING ^ NO_RESIZING,
289 memory_order_relaxed);
294 resize_worker(NAME *htab)
296 size_t resize_state = atomic_load_explicit(&htab->resizing_state,
297 memory_order_acquire);
299 /* If the resize has finished */
300 if (IS_NO_RESIZE_OR_CLEANING(resize_state))
303 /* Register as worker and check if the resize has finished in the meantime*/
304 resize_state = atomic_fetch_add_explicit(&htab->resizing_state,
306 memory_order_acquire);
307 if (IS_NO_RESIZE_OR_CLEANING(resize_state))
309 atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT,
310 memory_order_relaxed);
314 /* Wait while the new table is being allocated. */
315 while (GET_STATE(resize_state) == ALLOCATING_MEMORY)
316 resize_state = atomic_load_explicit(&htab->resizing_state,
317 memory_order_acquire);
319 /* Check if the resize is done */
320 assert(GET_STATE(resize_state) != NO_RESIZING);
321 if (GET_STATE(resize_state) == CLEANING)
323 atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT,
324 memory_order_relaxed);
328 resize_helper(htab, 0);
330 /* Deregister worker */
331 atomic_fetch_sub_explicit(&htab->resizing_state, STATE_INCREMENT,
332 memory_order_release);
337 #define INIT(name) _INIT (name)
338 #define _INIT(name) \
340 INIT(NAME) (NAME *htab, size_t init_size)
342 /* We need the size to be a prime. */
343 init_size = next_prime (init_size);
345 /* Initialize the data structure. */
346 htab->size = init_size;
347 atomic_init(&htab->filled, 0);
348 atomic_init(&htab->resizing_state, 0);
350 atomic_init(&htab->next_init_block, 0);
351 atomic_init(&htab->num_initialized_blocks, 0);
353 atomic_init(&htab->next_move_block, 0);
354 atomic_init(&htab->num_moved_blocks, 0);
356 pthread_rwlock_init(&htab->resize_rwl, NULL);
358 htab->table = malloc ((init_size + 1) * sizeof (htab->table[0]));
359 if (htab->table == NULL)
362 for (size_t i = 0; i <= init_size; i++)
364 atomic_init(&htab->table[i].hashval, (uintptr_t) NULL);
365 atomic_init(&htab->table[i].val_ptr, (uintptr_t) NULL);
373 #define FREE(name) _FREE (name)
374 #define _FREE(name) \
376 FREE(NAME) (NAME *htab)
378 pthread_rwlock_destroy(&htab->resize_rwl);
385 #define INSERT(name) _INSERT (name)
386 #define _INSERT(name) \
388 INSERT(NAME) (NAME *htab, HASHTYPE hval, TYPE data)
394 while (pthread_rwlock_tryrdlock(&htab->resize_rwl) != 0)
400 filled = atomic_fetch_add_explicit(&htab->filled, 1,
401 memory_order_acquire);
406 filled = atomic_load_explicit(&htab->filled,
407 memory_order_acquire);
411 if (100 * filled > 90 * htab->size)
413 /* Table is filled more than 90%. Resize the table. */
415 size_t resizing_state = atomic_load_explicit(&htab->resizing_state,
416 memory_order_acquire);
417 if (resizing_state == 0 &&
418 atomic_compare_exchange_strong_explicit(&htab->resizing_state,
421 memory_order_acquire,
422 memory_order_acquire))
425 pthread_rwlock_unlock(&htab->resize_rwl);
427 pthread_rwlock_wrlock(&htab->resize_rwl);
429 pthread_rwlock_unlock(&htab->resize_rwl);
435 pthread_rwlock_unlock(&htab->resize_rwl);
441 /* Lock acquired, no need for resize*/
446 int ret_val = insert_helper(htab, hval, data);
448 atomic_fetch_sub_explicit(&htab->filled, 1, memory_order_relaxed);
449 pthread_rwlock_unlock(&htab->resize_rwl);
456 #define FIND(name) _FIND (name)
457 #define _FIND(name) \
459 FIND(NAME) (NAME *htab, HASHTYPE hval)
461 while (pthread_rwlock_tryrdlock(&htab->resize_rwl) != 0)
466 /* Make the hash data nonzero. */
468 idx = lookup(htab, hval);
472 pthread_rwlock_unlock(&htab->resize_rwl);
476 /* get a copy before unlocking the lock */
477 TYPE ret_val = (TYPE) atomic_load_explicit(&htab->table[idx].val_ptr,
478 memory_order_relaxed);
480 pthread_rwlock_unlock(&htab->resize_rwl);