1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2007, 2009, 2010, 2011 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, write to the Free
17 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
29 #include <sys/param.h>
30 #include <bits/libc-lock.h>
34 #include <sysdep-cancel.h>
40 extern ElfW(Addr) _dl_sysdep_start (void **start_argptr,
41 void (*dl_main) (const ElfW(Phdr) *phdr,
43 ElfW(Addr) *user_entry,
45 weak_extern (BP_SYM (_dl_sysdep_start))
47 extern int __libc_multiple_libcs; /* Defined in init-first.c. */
49 /* Undefine the following for debugging. */
50 /* #define SCOPE_DEBUG 1 */
52 static void show_scope (struct link_map *new);
55 /* We must be carefull not to leave us in an inconsistent state. Thus we
56 catch any error and re-raise it after cleaning up. */
62 /* This is the caller of the dlopen() function. */
63 const void *caller_dlopen;
64 /* This is the caller if _dl_open(). */
65 const void *caller_dl_open;
69 /* Original parameters to the program and the current environment. */
77 add_to_global (struct link_map *new)
79 struct link_map **new_global;
80 unsigned int to_add = 0;
83 /* Count the objects we have to put in the global scope. */
84 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
85 if (new->l_searchlist.r_list[cnt]->l_global == 0)
88 /* The symbols of the new objects and its dependencies are to be
89 introduced into the global scope that will be used to resolve
90 references from other dynamically-loaded objects.
92 The global scope is the searchlist in the main link map. We
93 extend this list if necessary. There is one problem though:
94 since this structure was allocated very early (before the libc
95 is loaded) the memory it uses is allocated by the malloc()-stub
96 in the ld.so. When we come here these functions are not used
97 anymore. Instead the malloc() implementation of the libc is
98 used. But this means the block from the main map cannot be used
99 in an realloc() call. Therefore we allocate a completely new
100 array the first time we have to add something to the locale scope. */
102 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
103 if (ns->_ns_global_scope_alloc == 0)
105 /* This is the first dynamic object given global scope. */
106 ns->_ns_global_scope_alloc
107 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
108 new_global = (struct link_map **)
109 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
110 if (new_global == NULL)
112 ns->_ns_global_scope_alloc = 0;
114 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
115 N_("cannot extend global scope"));
119 /* Copy over the old entries. */
120 ns->_ns_main_searchlist->r_list
121 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
122 (ns->_ns_main_searchlist->r_nlist
123 * sizeof (struct link_map *)));
125 else if (ns->_ns_main_searchlist->r_nlist + to_add
126 > ns->_ns_global_scope_alloc)
128 /* We have to extend the existing array of link maps in the
130 struct link_map **old_global
131 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
132 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
134 new_global = (struct link_map **)
135 malloc (new_nalloc * sizeof (struct link_map *));
136 if (new_global == NULL)
139 memcpy (new_global, old_global,
140 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
142 ns->_ns_global_scope_alloc = new_nalloc;
143 ns->_ns_main_searchlist->r_list = new_global;
145 if (!RTLD_SINGLE_THREAD_P)
146 THREAD_GSCOPE_WAIT ();
151 /* Now add the new entries. */
152 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
153 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
155 struct link_map *map = new->l_searchlist.r_list[cnt];
157 if (map->l_global == 0)
160 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
163 atomic_write_barrier ();
164 ns->_ns_main_searchlist->r_nlist = new_nlist;
170 dl_open_worker (void *a)
172 struct dl_open_args *args = a;
173 const char *file = args->file;
174 int mode = args->mode;
175 struct link_map *call_map = NULL;
177 /* Check whether _dl_open() has been called from a valid DSO. */
178 if (__check_caller (args->caller_dl_open,
179 allow_libc|allow_libdl|allow_ldso) != 0)
180 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
182 /* Determine the caller's map if necessary. This is needed in case
183 we have a DST, when we don't know the namespace ID we have to put
184 the new object in, or when the file name has no path in which
185 case we need to look along the RUNPATH/RPATH of the caller. */
186 const char *dst = strchr (file, '$');
187 if (dst != NULL || args->nsid == __LM_ID_CALLER
188 || strchr (file, '/') == NULL)
190 const void *caller_dlopen = args->caller_dlopen;
192 /* We have to find out from which object the caller is calling.
193 By default we assume this is the main application. */
194 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
197 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
198 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
199 if (caller_dlopen >= (const void *) l->l_map_start
200 && caller_dlopen < (const void *) l->l_map_end
202 || _dl_addr_inside_object (l, (ElfW(Addr)) caller_dlopen)))
204 assert (ns == l->l_ns);
210 if (args->nsid == __LM_ID_CALLER)
213 /* In statically linked apps there might be no loaded object. */
214 if (call_map == NULL)
215 args->nsid = LM_ID_BASE;
218 args->nsid = call_map->l_ns;
222 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
224 /* Load the named object. */
225 struct link_map *new;
226 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
227 mode | __RTLD_CALLMAP, args->nsid);
229 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
230 set and the object is not already loaded. */
233 assert (mode & RTLD_NOLOAD);
237 if (__builtin_expect (mode & __RTLD_SPROF, 0))
238 /* This happens only if we load a DSO for 'sprof'. */
241 /* This object is directly loaded. */
242 ++new->l_direct_opencount;
244 /* It was already open. */
245 if (__builtin_expect (new->l_searchlist.r_list != NULL, 0))
247 /* Let the user know about the opencount. */
248 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
249 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
250 new->l_name, new->l_ns, new->l_direct_opencount);
252 /* If the user requested the object to be in the global namespace
253 but it is not so far, add it now. */
254 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
255 (void) add_to_global (new);
257 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
262 /* Load that object's dependencies. */
263 _dl_map_object_deps (new, NULL, 0, 0,
264 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
266 /* So far, so good. Now check the versions. */
267 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
268 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
269 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
277 /* Auditing checkpoint: we have added all objects. */
278 if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
280 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
281 /* Do not call the functions for any auditing object. */
282 if (head->l_auditing == 0)
284 struct audit_ifaces *afct = GLRO(dl_audit);
285 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
287 if (afct->activity != NULL)
288 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
296 /* Notify the debugger all new objects are now ready to go. */
297 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
298 r->r_state = RT_CONSISTENT;
301 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
302 int reloc_mode = mode & __RTLD_AUDIT;
304 reloc_mode |= mode & RTLD_LAZY;
306 /* Relocate the objects loaded. We do this in reverse order so that copy
307 relocs of earlier objects overwrite the data written by later objects. */
309 struct link_map *l = new;
314 if (! l->l_real->l_relocated)
317 if (__builtin_expect (GLRO(dl_profile) != NULL, 0))
319 /* If this here is the shared object which we want to profile
320 make sure the profile is started. We can find out whether
321 this is necessary or not by observing the `_dl_profile_map'
322 variable. If was NULL but is not NULL afterwars we must
323 start the profiling. */
324 struct link_map *old_profile_map = GL(dl_profile_map);
326 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
328 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
330 /* We must prepare the profiling. */
331 _dl_start_profile ();
333 /* Prevent unloading the object. */
334 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
339 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
347 /* If the file is not loaded now as a dependency, add the search
348 list of the newly loaded object to the scope. */
349 bool any_tls = false;
350 unsigned int first_static_tls = new->l_searchlist.r_nlist;
351 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
353 struct link_map *imap = new->l_searchlist.r_list[i];
355 /* If the initializer has been called already, the object has
356 not been loaded here and now. */
357 if (imap->l_init_called && imap->l_type == lt_loaded)
359 struct r_scope_elem **runp = imap->l_scope;
362 while (*runp != NULL)
364 if (*runp == &new->l_searchlist)
371 /* Avoid duplicates. */
374 if (__builtin_expect (cnt + 1 >= imap->l_scope_max, 0))
376 /* The 'r_scope' array is too small. Allocate a new one
379 struct r_scope_elem **newp;
381 #define SCOPE_ELEMS(imap) \
382 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
384 if (imap->l_scope != imap->l_scope_mem
385 && imap->l_scope_max < SCOPE_ELEMS (imap))
387 new_size = SCOPE_ELEMS (imap);
388 newp = imap->l_scope_mem;
392 new_size = imap->l_scope_max * 2;
393 newp = (struct r_scope_elem **)
394 malloc (new_size * sizeof (struct r_scope_elem *));
396 _dl_signal_error (ENOMEM, "dlopen", NULL,
397 N_("cannot create scope list"));
400 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
401 struct r_scope_elem **old = imap->l_scope;
403 imap->l_scope = newp;
405 if (old != imap->l_scope_mem)
406 _dl_scope_free (old);
408 imap->l_scope_max = new_size;
411 /* First terminate the extended list. Otherwise a thread
412 might use the new last element and then use the garbage
414 imap->l_scope[cnt + 1] = NULL;
415 atomic_write_barrier ();
416 imap->l_scope[cnt] = &new->l_searchlist;
418 /* Only add TLS memory if this object is loaded now and
419 therefore is not yet initialized. */
420 else if (! imap->l_init_called
421 /* Only if the module defines thread local data. */
422 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
424 /* Now that we know the object is loaded successfully add
425 modules containing TLS data to the slot info table. We
426 might have to increase its size. */
427 _dl_add_to_slotinfo (imap);
429 if (imap->l_need_tls_init
430 && first_static_tls == new->l_searchlist.r_nlist)
431 first_static_tls = i;
433 /* We have to bump the generation counter. */
438 /* Bump the generation number if necessary. */
439 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
440 _dl_fatal_printf (N_("\
441 TLS generation counter wrapped! Please report this."));
443 /* We need a second pass for static tls data, because _dl_update_slotinfo
444 must not be run while calls to _dl_add_to_slotinfo are still pending. */
445 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
447 struct link_map *imap = new->l_searchlist.r_list[i];
449 if (imap->l_need_tls_init
450 && ! imap->l_init_called
451 && imap->l_tls_blocksize > 0)
453 /* For static TLS we have to allocate the memory here and
454 now. This includes allocating memory in the DTV. But we
455 cannot change any DTV other than our own. So, if we
456 cannot guarantee that there is room in the DTV we don't
457 even try it and fail the load.
459 XXX We could track the minimum DTV slots allocated in
461 if (! RTLD_SINGLE_THREAD_P && imap->l_tls_modid > DTV_SURPLUS)
462 _dl_signal_error (0, "dlopen", NULL, N_("\
463 cannot load any more object with static TLS"));
465 imap->l_need_tls_init = 0;
467 /* Update the slot information data for at least the
468 generation of the DSO we are allocating data for. */
469 _dl_update_slotinfo (imap->l_tls_modid);
472 GL(dl_init_static_tls) (imap);
473 assert (imap->l_need_tls_init == 0);
477 /* Run the initializer functions of new objects. */
478 _dl_init (new, args->argc, args->argv, args->env);
480 /* Now we can make the new map available in the global scope. */
481 if (mode & RTLD_GLOBAL)
482 /* Move the object in the global namespace. */
483 if (add_to_global (new) != 0)
487 /* Mark the object as not deletable if the RTLD_NODELETE flags was
489 if (__builtin_expect (mode & RTLD_NODELETE, 0))
490 new->l_flags_1 |= DF_1_NODELETE;
493 /* We must be the static _dl_open in libc.a. A static program that
494 has loaded a dynamic object now has competition. */
495 __libc_multiple_libcs = 1;
498 /* Let the user know about the opencount. */
499 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
500 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
501 new->l_name, new->l_ns, new->l_direct_opencount);
506 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
507 int argc, char *argv[], char *env[])
509 if ((mode & RTLD_BINDING_MASK) == 0)
510 /* One of the flags must be set. */
511 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
513 /* Make sure we are alone. */
514 __rtld_lock_lock_recursive (GL(dl_load_lock));
516 if (__builtin_expect (nsid == LM_ID_NEWLM, 0))
518 /* Find a new namespace. */
519 for (nsid = 1; nsid < GL(dl_nns); ++nsid)
520 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
523 if (__builtin_expect (nsid == DL_NNS, 0))
525 /* No more namespace available. */
526 __rtld_lock_unlock_recursive (GL(dl_load_lock));
528 _dl_signal_error (EINVAL, file, NULL, N_("\
529 no more namespaces available for dlmopen()"));
532 if (nsid == GL(dl_nns))
534 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
538 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
540 /* Never allow loading a DSO in a namespace which is empty. Such
541 direct placements is only causing problems. Also don't allow
542 loading into a namespace used for auditing. */
543 else if (__builtin_expect (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER, 0)
544 && (GL(dl_ns)[nsid]._ns_nloaded == 0
545 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
546 _dl_signal_error (EINVAL, file, NULL,
547 N_("invalid target namespace in dlmopen()"));
549 else if ((nsid == LM_ID_BASE || nsid == __LM_ID_CALLER)
550 && GL(dl_ns)[LM_ID_BASE]._ns_loaded == NULL
555 struct dl_open_args args;
558 args.caller_dlopen = caller_dlopen;
559 args.caller_dl_open = RETURN_ADDRESS (0);
567 const char *errstring;
569 int errcode = _dl_catch_error (&objname, &errstring, &malloced,
570 dl_open_worker, &args);
573 /* We must munmap() the cache file. */
577 /* See if an error occurred during loading. */
578 if (__builtin_expect (errstring != NULL, 0))
580 /* Remove the object from memory. It may be in an inconsistent
581 state if relocation failed, for example. */
584 /* Maybe some of the modules which were loaded use TLS.
585 Since it will be removed in the following _dl_close call
586 we have to mark the dtv array as having gaps to fill the
587 holes. This is a pessimistic assumption which won't hurt
588 if not true. There is no need to do this when we are
589 loading the auditing DSOs since TLS has not yet been set
591 if ((mode & __RTLD_AUDIT) == 0)
592 GL(dl_tls_dtv_gaps) = true;
594 _dl_close_worker (args.map);
597 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
599 /* Release the lock. */
600 __rtld_lock_unlock_recursive (GL(dl_load_lock));
602 /* Make a local copy of the error string so that we can release the
603 memory allocated for it. */
604 size_t len_errstring = strlen (errstring) + 1;
605 char *local_errstring;
606 if (objname == errstring + len_errstring)
608 size_t total_len = len_errstring + strlen (objname) + 1;
609 local_errstring = alloca (total_len);
610 memcpy (local_errstring, errstring, total_len);
611 objname = local_errstring + len_errstring;
615 local_errstring = alloca (len_errstring);
616 memcpy (local_errstring, errstring, len_errstring);
620 free ((char *) errstring);
622 /* Reraise the error. */
623 _dl_signal_error (errcode, objname, NULL, local_errstring);
626 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
628 /* Release the lock. */
629 __rtld_lock_unlock_recursive (GL(dl_load_lock));
632 DL_STATIC_INIT (args.map);
643 show_scope (struct link_map *new)
647 for (scope_cnt = 0; new->l_scope[scope_cnt] != NULL; ++scope_cnt)
652 numbuf[0] = '0' + scope_cnt;
654 _dl_printf ("scope %s:", numbuf);
656 for (cnt = 0; cnt < new->l_scope[scope_cnt]->r_nlist; ++cnt)
657 if (*new->l_scope[scope_cnt]->r_list[cnt]->l_name)
658 _dl_printf (" %s", new->l_scope[scope_cnt]->r_list[cnt]->l_name);
660 _dl_printf (" <main>");
668 /* Return non-zero if ADDR lies within one of L's segments. */
671 _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
674 const ElfW(Addr) reladdr = addr - l->l_addr;
677 if (l->l_phdr[n].p_type == PT_LOAD
678 && reladdr - l->l_phdr[n].p_vaddr >= 0
679 && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz)