+2010-02-02 Andreas Schwab <schwab@redhat.com>
+
+ * sysdeps/generic/ldsodefs.h (struct rtld_global): Add
+ _dl_load_write_lock.
+ * elf/rtld.c (_rtld_global): Initialize it.
+ * elf/dl-support.c (_dl_load_write_lock): Define .
+ * elf/dl-close.c (_dl_close_worker): Lock GL(dl_load_write_lock)
+ when modifying the list of loaded objects.
+ * elf/dl-load.c (lose): Likewise.
+ * elf/dl-object.c (_dl_new_object): Likewise.
+ * elf/dl-iteratephdr.c (__dl_iterate_phdr): Lock
+ GL(dl_load_write_lock) instead of GL(dl_load_lock).
+
2010-05-03 Ulrich Drepper <drepper@redhat.com>
* elf/dl-iteratephdr.c (__dl_iterate_phdr): Remove unnecessary
size_t tls_free_end;
tls_free_start = tls_free_end = NO_TLS_OFFSET;
+ /* We modify the list of loaded objects. */
+ __rtld_lock_lock_recursive (GL(dl_load_write_lock));
+
/* Check each element of the search list to see if all references to
it are gone. */
for (unsigned int i = first_loaded; i < nloaded; ++i)
}
}
+ __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
+
/* If we removed any object which uses TLS bump the generation counter. */
if (any_tls)
{
static void
cancel_handler (void *arg __attribute__((unused)))
{
- __rtld_lock_unlock_recursive (GL(dl_load_lock));
+ __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
}
hidden_proto (__dl_iterate_phdr)
struct dl_phdr_info info;
int ret = 0;
- /* Make sure we are alone. */
- __rtld_lock_lock_recursive (GL(dl_load_lock));
+ /* Make sure nobody modifies the list of loaded objects. */
+ __rtld_lock_lock_recursive (GL(dl_load_write_lock));
__libc_cleanup_push (cancel_handler, 0);
/* We have to determine the namespace of the caller since this determines
/* Release the lock. */
__libc_cleanup_pop (0);
- __rtld_lock_unlock_recursive (GL(dl_load_lock));
+ __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
return ret;
}
(void) __close (fd);
if (l != NULL)
{
+ /* We modify the list of loaded objects. */
+ __rtld_lock_lock_recursive (GL(dl_load_write_lock));
/* Remove the stillborn object from the list and free it. */
assert (l->l_next == NULL);
if (l->l_prev == NULL)
l->l_prev->l_next = NULL;
--GL(dl_ns)[l->l_ns]._ns_nloaded;
free (l);
+ __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
}
free (realname);
new->l_scope = new->l_scope_mem;
new->l_scope_max = sizeof (new->l_scope_mem) / sizeof (new->l_scope_mem[0]);
+ /* We modify the list of loaded objects. */
+ __rtld_lock_lock_recursive (GL(dl_load_write_lock));
+
/* Counter for the scopes we have to handle. */
idx = 0;
new->l_serial = GL(dl_load_adds);
++GL(dl_load_adds);
+ __rtld_lock_unlock_recursive (GL(dl_load_write_lock));
+
/* If we have no loader the new object acts as it. */
if (loader == NULL)
loader = new;
the loaded object might as well require a call to this function.
At this time it is not anymore a problem to modify the tables. */
__rtld_lock_define_initialized_recursive (, _dl_load_lock)
+/* This lock is used to keep __dl_iterate_phdr from inspecting the
+ list of loaded objects while an object is added to or removed from
+ that list. */
+__rtld_lock_define_initialized_recursive (, _dl_load_write_lock)
#ifdef HAVE_AUX_VECTOR
._dl_stack_flags = PF_R|PF_W|PF_X,
#ifdef _LIBC_REENTRANT
._dl_load_lock = _RTLD_LOCK_RECURSIVE_INITIALIZER,
+ ._dl_load_write_lock = _RTLD_LOCK_RECURSIVE_INITIALIZER,
#endif
._dl_nns = 1,
._dl_ns =
the loaded object might as well require a call to this function.
At this time it is not anymore a problem to modify the tables. */
__rtld_lock_define_recursive (EXTERN, _dl_load_lock)
+ /* This lock is used to keep __dl_iterate_phdr from inspecting the
+ list of loaded objects while an object is added to or removed
+ from that list. */
+ __rtld_lock_define_recursive (EXTERN, _dl_load_write_lock)
/* Incremented whenever something may have been added to dl_loaded. */
EXTERN unsigned long long _dl_load_adds;