1 /* Memory handling for libdw.
2 Copyright (C) 2003, 2004, 2006 Red Hat, Inc.
3 This file is part of elfutils.
4 Written by Ulrich Drepper <drepper@redhat.com>, 2003.
6 This file is free software; you can redistribute it and/or modify
7 it under the terms of either
9 * the GNU Lesser General Public License as published by the Free
10 Software Foundation; either version 3 of the License, or (at
11 your option) any later version
15 * the GNU General Public License as published by the Free
16 Software Foundation; either version 2 of the License, or (at
17 your option) any later version
19 or both in parallel, as here.
21 elfutils is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received copies of the GNU General Public License and
27 the GNU Lesser General Public License along with this program. If
28 not, see <http://www.gnu.org/licenses/>. */
39 #if USE_VG_ANNOTATIONS == 1
42 #define ANNOTATE_HAPPENS_BEFORE(X)
43 #define ANNOTATE_HAPPENS_AFTER(X)
46 #define THREAD_ID_UNSET ((size_t) -1)
47 static __thread size_t thread_id = THREAD_ID_UNSET;
48 static atomic_size_t next_id = ATOMIC_VAR_INIT(0);
50 struct libdw_memblock *
51 __libdw_alloc_tail (Dwarf *dbg)
53 if (thread_id == THREAD_ID_UNSET)
54 thread_id = atomic_fetch_add (&next_id, 1);
56 pthread_rwlock_rdlock (&dbg->mem_rwl);
57 if (thread_id >= dbg->mem_stacks)
59 pthread_rwlock_unlock (&dbg->mem_rwl);
60 pthread_rwlock_wrlock (&dbg->mem_rwl);
62 /* Another thread may have already reallocated. In theory using an
63 atomic would be faster, but given that this only happens once per
64 thread per Dwarf, some minor slowdown should be fine. */
65 if (thread_id >= dbg->mem_stacks)
67 dbg->mem_tails = realloc (dbg->mem_tails, (thread_id+1)
68 * sizeof (struct libdw_memblock *));
69 if (dbg->mem_tails == NULL)
71 pthread_rwlock_unlock (&dbg->mem_rwl);
74 for (size_t i = dbg->mem_stacks; i <= thread_id; i++)
75 dbg->mem_tails[i] = NULL;
76 dbg->mem_stacks = thread_id + 1;
77 ANNOTATE_HAPPENS_BEFORE (&dbg->mem_tails);
80 pthread_rwlock_unlock (&dbg->mem_rwl);
81 pthread_rwlock_rdlock (&dbg->mem_rwl);
84 /* At this point, we have an entry in the tail array. */
85 ANNOTATE_HAPPENS_AFTER (&dbg->mem_tails);
86 struct libdw_memblock *result = dbg->mem_tails[thread_id];
89 result = malloc (dbg->mem_default_size);
92 pthread_rwlock_unlock (&dbg->mem_rwl);
95 result->size = dbg->mem_default_size
96 - offsetof (struct libdw_memblock, mem);
97 result->remaining = result->size;
99 dbg->mem_tails[thread_id] = result;
101 pthread_rwlock_unlock (&dbg->mem_rwl);
105 /* Can only be called after a allocation for this thread has already
106 been done, to possibly undo it. */
107 struct libdw_memblock *
108 __libdw_thread_tail (Dwarf *dbg)
110 struct libdw_memblock *result;
111 pthread_rwlock_rdlock (&dbg->mem_rwl);
112 result = dbg->mem_tails[thread_id];
113 pthread_rwlock_unlock (&dbg->mem_rwl);
118 __libdw_allocate (Dwarf *dbg, size_t minsize, size_t align)
120 size_t size = MAX (dbg->mem_default_size,
122 2 * minsize + offsetof (struct libdw_memblock, mem)));
123 struct libdw_memblock *newp = malloc (size);
127 uintptr_t result = ((uintptr_t) newp->mem + align - 1) & ~(align - 1);
129 newp->size = size - offsetof (struct libdw_memblock, mem);
130 newp->remaining = (uintptr_t) newp + size - (result + minsize);
132 pthread_rwlock_rdlock (&dbg->mem_rwl);
133 newp->prev = dbg->mem_tails[thread_id];
134 dbg->mem_tails[thread_id] = newp;
135 pthread_rwlock_unlock (&dbg->mem_rwl);
137 return (void *) result;
142 dwarf_new_oom_handler (Dwarf *dbg, Dwarf_OOM handler)
144 Dwarf_OOM old = dbg->oom_handler;
145 dbg->oom_handler = handler;
151 __attribute ((noreturn)) attribute_hidden
155 error (EXIT_FAILURE, ENOMEM, "libdw");