/* Load a shared object at runtime, relocate it, and run its initializer.
- Copyright (C) 1996-2004, 2005, 2006 Free Software Foundation, Inc.
+ Copyright (C) 1996-2007, 2009 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
#include <bp-sym.h>
#include <caller.h>
#include <sysdep-cancel.h>
+#include <tls.h>
#include <dl-dst.h>
in an realloc() call. Therefore we allocate a completely new
array the first time we have to add something to the locale scope. */
- if (GL(dl_ns)[new->l_ns]._ns_global_scope_alloc == 0)
+ struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
+ if (ns->_ns_global_scope_alloc == 0)
{
/* This is the first dynamic object given global scope. */
- GL(dl_ns)[new->l_ns]._ns_global_scope_alloc
- = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_nlist + to_add + 8;
+ ns->_ns_global_scope_alloc
+ = ns->_ns_main_searchlist->r_nlist + to_add + 8;
new_global = (struct link_map **)
- malloc (GL(dl_ns)[new->l_ns]._ns_global_scope_alloc
- * sizeof (struct link_map *));
+ malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
if (new_global == NULL)
{
- GL(dl_ns)[new->l_ns]._ns_global_scope_alloc = 0;
+ ns->_ns_global_scope_alloc = 0;
nomem:
_dl_signal_error (ENOMEM, new->l_libname->name, NULL,
N_("cannot extend global scope"));
}
/* Copy over the old entries. */
- GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list
- = memcpy (new_global,
- GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list,
- (GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_nlist
+ ns->_ns_main_searchlist->r_list
+ = memcpy (new_global, ns->_ns_main_searchlist->r_list,
+ (ns->_ns_main_searchlist->r_nlist
* sizeof (struct link_map *)));
}
- else if (GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_nlist + to_add
- > GL(dl_ns)[new->l_ns]._ns_global_scope_alloc)
+ else if (ns->_ns_main_searchlist->r_nlist + to_add
+ > ns->_ns_global_scope_alloc)
{
/* We have to extend the existing array of link maps in the
main map. */
+ struct link_map **old_global
+ = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
+ size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
+
new_global = (struct link_map **)
- realloc (GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list,
- ((GL(dl_ns)[new->l_ns]._ns_global_scope_alloc + to_add + 8)
- * sizeof (struct link_map *)));
+ malloc (new_nalloc * sizeof (struct link_map *));
if (new_global == NULL)
goto nomem;
- GL(dl_ns)[new->l_ns]._ns_global_scope_alloc += to_add + 8;
- GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list = new_global;
+ memcpy (new_global, old_global,
+ ns->_ns_global_scope_alloc * sizeof (struct link_map *));
+
+ ns->_ns_global_scope_alloc = new_nalloc;
+ ns->_ns_main_searchlist->r_list = new_global;
+
+ if (!RTLD_SINGLE_THREAD_P)
+ THREAD_GSCOPE_WAIT ();
+
+ free (old_global);
}
/* Now add the new entries. */
+ unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
{
struct link_map *map = new->l_searchlist.r_list[cnt];
if (map->l_global == 0)
{
map->l_global = 1;
- GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list[GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_nlist]
- = map;
- ++GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_nlist;
+ ns->_ns_main_searchlist->r_list[new_nlist++] = map;
}
}
+ atomic_write_barrier ();
+ ns->_ns_main_searchlist->r_nlist = new_nlist;
return 0;
}
+int
+_dl_scope_free (void *old)
+{
+ struct dl_scope_free_list *fsl;
+#define DL_SCOPE_FREE_LIST_SIZE (sizeof (fsl->list) / sizeof (fsl->list[0]))
+
+ if (RTLD_SINGLE_THREAD_P)
+ free (old);
+ else if ((fsl = GL(dl_scope_free_list)) == NULL)
+ {
+ GL(dl_scope_free_list) = fsl = malloc (sizeof (*fsl));
+ if (fsl == NULL)
+ {
+ THREAD_GSCOPE_WAIT ();
+ free (old);
+ return 1;
+ }
+ else
+ {
+ fsl->list[0] = old;
+ fsl->count = 1;
+ }
+ }
+ else if (fsl->count < DL_SCOPE_FREE_LIST_SIZE)
+ fsl->list[fsl->count++] = old;
+ else
+ {
+ THREAD_GSCOPE_WAIT ();
+ while (fsl->count > 0)
+ free (fsl->list[--fsl->count]);
+ return 1;
+ }
+ return 0;
+}
static void
dl_open_worker (void *a)
struct dl_open_args *args = a;
const char *file = args->file;
int mode = args->mode;
- struct link_map *new, *l;
- int lazy;
- unsigned int i;
-#ifdef USE_TLS
- bool any_tls = false;
-#endif
struct link_map *call_map = NULL;
/* Check whether _dl_open() has been called from a valid DSO. */
By default we assume this is the main application. */
call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
- for (Lmid_t ns = 0; ns < DL_NNS; ++ns)
+ struct link_map *l;
+ for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
if (caller_dlopen >= (const void *) l->l_map_start
- && caller_dlopen < (const void *) l->l_map_end)
+ && caller_dlopen < (const void *) l->l_map_end
+ && (l->l_contiguous
+ || _dl_addr_inside_object (l, (ElfW(Addr)) caller_dlopen)))
{
- /* There must be exactly one DSO for the range of the virtual
- memory. Otherwise something is really broken. */
assert (ns == l->l_ns);
call_map = l;
goto found_caller;
if (__builtin_expect (dst != NULL, 0))
{
size_t len = strlen (file);
- size_t required;
- char *new_file;
/* Determine how much space we need. We have to allocate the
memory locally. */
- required = DL_DST_REQUIRED (call_map, file, len, _dl_dst_count (dst, 0));
+ size_t required = DL_DST_REQUIRED (call_map, file, len,
+ _dl_dst_count (dst, 0));
/* Get space for the new file name. */
- new_file = (char *) alloca (required + 1);
+ char *new_file = (char *) alloca (required + 1);
/* Generate the new file name. */
_dl_dst_substitute (call_map, file, new_file, 0);
}
/* Load the named object. */
+ struct link_map *new;
args->map = new = _dl_map_object (call_map, file, 0, lt_loaded, 0,
mode | __RTLD_CALLMAP, args->nsid);
mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
/* So far, so good. Now check the versions. */
- for (i = 0; i < new->l_searchlist.r_nlist; ++i)
+ for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
(void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
0, 0);
_dl_debug_state ();
/* Only do lazy relocation if `LD_BIND_NOW' is not set. */
- lazy = (mode & RTLD_BINDING_MASK) == RTLD_LAZY && GLRO(dl_lazy);
+ int reloc_mode = mode & __RTLD_AUDIT;
+ if (GLRO(dl_lazy))
+ reloc_mode |= mode & RTLD_LAZY;
/* Relocate the objects loaded. We do this in reverse order so that copy
relocs of earlier objects overwrite the data written by later objects. */
- l = new;
+ struct link_map *l = new;
while (l->l_next)
l = l->l_next;
while (1)
start the profiling. */
struct link_map *old_profile_map = GL(dl_profile_map);
- _dl_relocate_object (l, l->l_scope, 1, 1);
+ _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
{
}
else
#endif
- _dl_relocate_object (l, l->l_scope, lazy, 0);
+ _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
}
if (l == new)
/* If the file is not loaded now as a dependency, add the search
list of the newly loaded object to the scope. */
- for (i = 0; i < new->l_searchlist.r_nlist; ++i)
+ bool any_tls = false;
+ for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
{
struct link_map *imap = new->l_searchlist.r_list[i];
memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
struct r_scope_elem **old = imap->l_scope;
- if (SINGLE_THREAD_P)
- imap->l_scope = newp;
- else
- {
- __rtld_mrlock_change (imap->l_scope_lock);
- imap->l_scope = newp;
- __rtld_mrlock_done (imap->l_scope_lock);
- }
+ imap->l_scope = newp;
if (old != imap->l_scope_mem)
- free (old);
+ _dl_scope_free (old);
imap->l_scope_max = new_size;
}
atomic_write_barrier ();
imap->l_scope[cnt] = &new->l_searchlist;
}
-#if USE_TLS
/* Only add TLS memory if this object is loaded now and
therefore is not yet initialized. */
else if (! imap->l_init_called
if (imap->l_need_tls_init)
{
imap->l_need_tls_init = 0;
-# ifdef SHARED
+#ifdef SHARED
/* Update the slot information data for at least the
generation of the DSO we are allocating data for. */
_dl_update_slotinfo (imap->l_tls_modid);
-# endif
+#endif
GL(dl_init_static_tls) (imap);
assert (imap->l_need_tls_init == 0);
/* We have to bump the generation counter. */
any_tls = true;
}
-#endif
}
-#if USE_TLS
/* Bump the generation number if necessary. */
if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
_dl_fatal_printf (N_("\
TLS generation counter wrapped! Please report this."));
-#endif
/* Run the initializer functions of new objects. */
_dl_init (new, args->argc, args->argv, args->env);
/* Make sure we are alone. */
__rtld_lock_lock_recursive (GL(dl_load_lock));
- if (nsid == LM_ID_NEWLM)
+ if (__builtin_expect (nsid == LM_ID_NEWLM, 0))
{
/* Find a new namespace. */
- for (nsid = 1; nsid < DL_NNS; ++nsid)
+ for (nsid = 1; nsid < GL(dl_nns); ++nsid)
if (GL(dl_ns)[nsid]._ns_loaded == NULL)
break;
no more namespaces available for dlmopen()"));
}
+ if (nsid == GL(dl_nns))
+ ++GL(dl_nns);
+
_dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
}
/* Never allow loading a DSO in a namespace which is empty. Such
direct placements is only causing problems. Also don't allow
loading into a namespace used for auditing. */
- else if (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER
+ else if (__builtin_expect (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER, 0)
&& (GL(dl_ns)[nsid]._ns_nloaded == 0
|| GL(dl_ns)[nsid]._ns_loaded->l_auditing))
_dl_signal_error (EINVAL, file, NULL,
N_("invalid target namespace in dlmopen()"));
+#ifndef SHARED
+ else if ((nsid == LM_ID_BASE || nsid == __LM_ID_CALLER)
+ && GL(dl_ns)[LM_ID_BASE]._ns_loaded == NULL
+ && GL(dl_nns) == 0)
+ GL(dl_nns) = 1;
+#endif
struct dl_open_args args;
args.file = file;
state if relocation failed, for example. */
if (args.map)
{
-#ifdef USE_TLS
/* Maybe some of the modules which were loaded use TLS.
Since it will be removed in the following _dl_close call
we have to mark the dtv array as having gaps to fill the
up. */
if ((mode & __RTLD_AUDIT) == 0)
GL(dl_tls_dtv_gaps) = true;
-#endif
- _dl_close (args.map);
+ _dl_close_worker (args.map);
}
assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
}
}
#endif
+
+#ifdef IS_IN_rtld
+/* Return non-zero if ADDR lies within one of L's segments. */
+int
+internal_function
+_dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
+{
+ int n = l->l_phnum;
+ const ElfW(Addr) reladdr = addr - l->l_addr;
+
+ while (--n >= 0)
+ if (l->l_phdr[n].p_type == PT_LOAD
+ && reladdr - l->l_phdr[n].p_vaddr >= 0
+ && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz)
+ return 1;
+ return 0;
+}
+#endif