1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <bits/libc-lock.h>
32 #include <sysdep-cancel.h>
34 #include <stap-probe.h>
40 extern int __libc_multiple_libcs; /* Defined in init-first.c. */
42 /* We must be careful not to leave us in an inconsistent state. Thus we
43 catch any error and re-raise it after cleaning up. */
49 /* This is the caller of the dlopen() function. */
50 const void *caller_dlopen;
51 /* This is the caller of _dl_open(). */
52 const void *caller_dl_open;
56 /* Original parameters to the program and the current environment. */
64 add_to_global (struct link_map *new)
66 struct link_map **new_global;
67 unsigned int to_add = 0;
70 /* Count the objects we have to put in the global scope. */
71 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
72 if (new->l_searchlist.r_list[cnt]->l_global == 0)
75 /* The symbols of the new objects and its dependencies are to be
76 introduced into the global scope that will be used to resolve
77 references from other dynamically-loaded objects.
79 The global scope is the searchlist in the main link map. We
80 extend this list if necessary. There is one problem though:
81 since this structure was allocated very early (before the libc
82 is loaded) the memory it uses is allocated by the malloc()-stub
83 in the ld.so. When we come here these functions are not used
84 anymore. Instead the malloc() implementation of the libc is
85 used. But this means the block from the main map cannot be used
86 in an realloc() call. Therefore we allocate a completely new
87 array the first time we have to add something to the locale scope. */
89 struct link_namespaces *ns = &GL(dl_ns)[new->l_ns];
90 if (ns->_ns_global_scope_alloc == 0)
92 /* This is the first dynamic object given global scope. */
93 ns->_ns_global_scope_alloc
94 = ns->_ns_main_searchlist->r_nlist + to_add + 8;
95 new_global = (struct link_map **)
96 malloc (ns->_ns_global_scope_alloc * sizeof (struct link_map *));
97 if (new_global == NULL)
99 ns->_ns_global_scope_alloc = 0;
101 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
102 N_("cannot extend global scope"));
106 /* Copy over the old entries. */
107 ns->_ns_main_searchlist->r_list
108 = memcpy (new_global, ns->_ns_main_searchlist->r_list,
109 (ns->_ns_main_searchlist->r_nlist
110 * sizeof (struct link_map *)));
112 else if (ns->_ns_main_searchlist->r_nlist + to_add
113 > ns->_ns_global_scope_alloc)
115 /* We have to extend the existing array of link maps in the
117 struct link_map **old_global
118 = GL(dl_ns)[new->l_ns]._ns_main_searchlist->r_list;
119 size_t new_nalloc = ((ns->_ns_global_scope_alloc + to_add) * 2);
121 new_global = (struct link_map **)
122 malloc (new_nalloc * sizeof (struct link_map *));
123 if (new_global == NULL)
126 memcpy (new_global, old_global,
127 ns->_ns_global_scope_alloc * sizeof (struct link_map *));
129 ns->_ns_global_scope_alloc = new_nalloc;
130 ns->_ns_main_searchlist->r_list = new_global;
132 if (!RTLD_SINGLE_THREAD_P)
133 THREAD_GSCOPE_WAIT ();
138 /* Now add the new entries. */
139 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
140 for (cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
142 struct link_map *map = new->l_searchlist.r_list[cnt];
144 if (map->l_global == 0)
147 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
149 /* We modify the global scope. Report this. */
150 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
151 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
152 map->l_name, map->l_ns);
155 atomic_write_barrier ();
156 ns->_ns_main_searchlist->r_nlist = new_nlist;
161 /* Search link maps in all namespaces for the DSO that containes the object at
162 address ADDR. Returns the pointer to the link map of the matching DSO, or
163 NULL if a match is not found. */
166 _dl_find_dso_for_object (const ElfW(Addr) addr)
170 /* Find the highest-addressed object that ADDR is not below. */
171 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
172 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
173 if (addr >= l->l_map_start && addr < l->l_map_end
175 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
177 assert (ns == l->l_ns);
182 rtld_hidden_def (_dl_find_dso_for_object);
185 dl_open_worker (void *a)
187 struct dl_open_args *args = a;
188 const char *file = args->file;
189 int mode = args->mode;
190 struct link_map *call_map = NULL;
192 /* Check whether _dl_open() has been called from a valid DSO. */
193 if (__check_caller (args->caller_dl_open,
194 allow_libc|allow_libdl|allow_ldso) != 0)
195 _dl_signal_error (0, "dlopen", NULL, N_("invalid caller"));
197 /* Determine the caller's map if necessary. This is needed in case
198 we have a DST, when we don't know the namespace ID we have to put
199 the new object in, or when the file name has no path in which
200 case we need to look along the RUNPATH/RPATH of the caller. */
201 const char *dst = strchr (file, '$');
202 if (dst != NULL || args->nsid == __LM_ID_CALLER
203 || strchr (file, '/') == NULL)
205 const void *caller_dlopen = args->caller_dlopen;
208 /* We have to find out from which object the caller is calling.
209 By default we assume this is the main application. */
210 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
213 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
218 if (args->nsid == __LM_ID_CALLER)
221 /* In statically linked apps there might be no loaded object. */
222 if (call_map == NULL)
223 args->nsid = LM_ID_BASE;
226 args->nsid = call_map->l_ns;
230 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
232 /* Load the named object. */
233 struct link_map *new;
234 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
235 mode | __RTLD_CALLMAP, args->nsid);
237 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
238 set and the object is not already loaded. */
241 assert (mode & RTLD_NOLOAD);
245 if (__builtin_expect (mode & __RTLD_SPROF, 0))
246 /* This happens only if we load a DSO for 'sprof'. */
249 /* This object is directly loaded. */
250 ++new->l_direct_opencount;
252 /* It was already open. */
253 if (__builtin_expect (new->l_searchlist.r_list != NULL, 0))
255 /* Let the user know about the opencount. */
256 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
257 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
258 new->l_name, new->l_ns, new->l_direct_opencount);
260 /* If the user requested the object to be in the global namespace
261 but it is not so far, add it now. */
262 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
263 (void) add_to_global (new);
265 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
270 /* Load that object's dependencies. */
271 _dl_map_object_deps (new, NULL, 0, 0,
272 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
274 /* So far, so good. Now check the versions. */
275 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
276 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
277 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
281 /* Auditing checkpoint: we have added all objects. */
282 if (__builtin_expect (GLRO(dl_naudit) > 0, 0))
284 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
285 /* Do not call the functions for any auditing object. */
286 if (head->l_auditing == 0)
288 struct audit_ifaces *afct = GLRO(dl_audit);
289 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
291 if (afct->activity != NULL)
292 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_CONSISTENT);
300 /* Notify the debugger all new objects are now ready to go. */
301 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
302 r->r_state = RT_CONSISTENT;
304 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
306 /* Print scope information. */
307 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
308 _dl_show_scope (new, 0);
310 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
311 int reloc_mode = mode & __RTLD_AUDIT;
313 reloc_mode |= mode & RTLD_LAZY;
315 /* Sort the objects by dependency for the relocation process. This
316 allows IFUNC relocations to work and it also means copy
317 relocation of dependencies are if necessary overwritten. */
319 struct link_map *l = new;
322 if (! l->l_real->l_relocated)
327 struct link_map *maps[nmaps];
332 if (! l->l_real->l_relocated)
339 uint16_t seen[nmaps];
340 memset (seen, '\0', sizeof (seen));
345 struct link_map *thisp = maps[i];
347 /* Find the last object in the list for which the current one is
348 a dependency and move the current object behind the object
349 with the dependency. */
350 size_t k = nmaps - 1;
353 struct link_map **runp = maps[k]->l_initfini;
355 /* Look through the dependencies of the object. */
356 while (*runp != NULL)
357 if (__builtin_expect (*runp++ == thisp, 0))
359 /* Move the current object to the back past the last
360 object with it as the dependency. */
361 memmove (&maps[i], &maps[i + 1],
362 (k - i) * sizeof (maps[0]));
365 if (seen[i + 1] > nmaps - i)
371 uint16_t this_seen = seen[i];
372 memmove (&seen[i], &seen[i + 1],
373 (k - i) * sizeof (seen[0]));
385 memset (&seen[i], 0, (nmaps - i) * sizeof (seen[0]));
390 int relocation_in_progress = 0;
392 for (size_t i = nmaps; i-- > 0; )
396 if (! relocation_in_progress)
398 /* Notify the debugger that relocations are about to happen. */
399 LIBC_PROBE (reloc_start, 2, args->nsid, r);
400 relocation_in_progress = 1;
404 if (__builtin_expect (GLRO(dl_profile) != NULL, 0))
406 /* If this here is the shared object which we want to profile
407 make sure the profile is started. We can find out whether
408 this is necessary or not by observing the `_dl_profile_map'
409 variable. If it was NULL but is not NULL afterwars we must
410 start the profiling. */
411 struct link_map *old_profile_map = GL(dl_profile_map);
413 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
415 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
417 /* We must prepare the profiling. */
418 _dl_start_profile ();
420 /* Prevent unloading the object. */
421 GL(dl_profile_map)->l_flags_1 |= DF_1_NODELETE;
426 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
429 /* If the file is not loaded now as a dependency, add the search
430 list of the newly loaded object to the scope. */
431 bool any_tls = false;
432 unsigned int first_static_tls = new->l_searchlist.r_nlist;
433 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
435 struct link_map *imap = new->l_searchlist.r_list[i];
438 /* If the initializer has been called already, the object has
439 not been loaded here and now. */
440 if (imap->l_init_called && imap->l_type == lt_loaded)
442 struct r_scope_elem **runp = imap->l_scope;
445 while (*runp != NULL)
447 if (*runp == &new->l_searchlist)
454 /* Avoid duplicates. */
457 if (__builtin_expect (cnt + 1 >= imap->l_scope_max, 0))
459 /* The 'r_scope' array is too small. Allocate a new one
462 struct r_scope_elem **newp;
464 #define SCOPE_ELEMS(imap) \
465 (sizeof (imap->l_scope_mem) / sizeof (imap->l_scope_mem[0]))
467 if (imap->l_scope != imap->l_scope_mem
468 && imap->l_scope_max < SCOPE_ELEMS (imap))
470 new_size = SCOPE_ELEMS (imap);
471 newp = imap->l_scope_mem;
475 new_size = imap->l_scope_max * 2;
476 newp = (struct r_scope_elem **)
477 malloc (new_size * sizeof (struct r_scope_elem *));
479 _dl_signal_error (ENOMEM, "dlopen", NULL,
480 N_("cannot create scope list"));
483 memcpy (newp, imap->l_scope, cnt * sizeof (imap->l_scope[0]));
484 struct r_scope_elem **old = imap->l_scope;
486 imap->l_scope = newp;
488 if (old != imap->l_scope_mem)
489 _dl_scope_free (old);
491 imap->l_scope_max = new_size;
494 /* First terminate the extended list. Otherwise a thread
495 might use the new last element and then use the garbage
497 imap->l_scope[cnt + 1] = NULL;
498 atomic_write_barrier ();
499 imap->l_scope[cnt] = &new->l_searchlist;
501 /* Print only new scope information. */
504 /* Only add TLS memory if this object is loaded now and
505 therefore is not yet initialized. */
506 else if (! imap->l_init_called
507 /* Only if the module defines thread local data. */
508 && __builtin_expect (imap->l_tls_blocksize > 0, 0))
510 /* Now that we know the object is loaded successfully add
511 modules containing TLS data to the slot info table. We
512 might have to increase its size. */
513 _dl_add_to_slotinfo (imap);
515 if (imap->l_need_tls_init
516 && first_static_tls == new->l_searchlist.r_nlist)
517 first_static_tls = i;
519 /* We have to bump the generation counter. */
523 /* Print scope information. */
524 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES, 0))
525 _dl_show_scope (imap, from_scope);
528 /* Bump the generation number if necessary. */
529 if (any_tls && __builtin_expect (++GL(dl_tls_generation) == 0, 0))
530 _dl_fatal_printf (N_("\
531 TLS generation counter wrapped! Please report this."));
533 /* We need a second pass for static tls data, because _dl_update_slotinfo
534 must not be run while calls to _dl_add_to_slotinfo are still pending. */
535 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
537 struct link_map *imap = new->l_searchlist.r_list[i];
539 if (imap->l_need_tls_init
540 && ! imap->l_init_called
541 && imap->l_tls_blocksize > 0)
543 /* For static TLS we have to allocate the memory here and
544 now. This includes allocating memory in the DTV. But we
545 cannot change any DTV other than our own. So, if we
546 cannot guarantee that there is room in the DTV we don't
547 even try it and fail the load.
549 XXX We could track the minimum DTV slots allocated in
551 if (! RTLD_SINGLE_THREAD_P && imap->l_tls_modid > DTV_SURPLUS)
552 _dl_signal_error (0, "dlopen", NULL, N_("\
553 cannot load any more object with static TLS"));
555 imap->l_need_tls_init = 0;
557 /* Update the slot information data for at least the
558 generation of the DSO we are allocating data for. */
559 _dl_update_slotinfo (imap->l_tls_modid);
562 GL(dl_init_static_tls) (imap);
563 assert (imap->l_need_tls_init == 0);
567 /* Notify the debugger all new objects have been relocated. */
568 if (relocation_in_progress)
569 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
572 DL_STATIC_INIT (new);
575 /* Run the initializer functions of new objects. */
576 _dl_init (new, args->argc, args->argv, args->env);
578 /* Now we can make the new map available in the global scope. */
579 if (mode & RTLD_GLOBAL)
580 /* Move the object in the global namespace. */
581 if (add_to_global (new) != 0)
585 /* Mark the object as not deletable if the RTLD_NODELETE flags was
587 if (__builtin_expect (mode & RTLD_NODELETE, 0))
588 new->l_flags_1 |= DF_1_NODELETE;
591 /* We must be the static _dl_open in libc.a. A static program that
592 has loaded a dynamic object now has competition. */
593 __libc_multiple_libcs = 1;
596 /* Let the user know about the opencount. */
597 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
598 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
599 new->l_name, new->l_ns, new->l_direct_opencount);
604 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
605 int argc, char *argv[], char *env[])
607 if ((mode & RTLD_BINDING_MASK) == 0)
608 /* One of the flags must be set. */
609 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
611 /* Make sure we are alone. */
612 __rtld_lock_lock_recursive (GL(dl_load_lock));
614 if (__builtin_expect (nsid == LM_ID_NEWLM, 0))
616 /* Find a new namespace. */
617 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
618 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
621 if (__builtin_expect (nsid == DL_NNS, 0))
623 /* No more namespace available. */
624 __rtld_lock_unlock_recursive (GL(dl_load_lock));
626 _dl_signal_error (EINVAL, file, NULL, N_("\
627 no more namespaces available for dlmopen()"));
629 else if (nsid == GL(dl_nns))
631 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
635 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
637 /* Never allow loading a DSO in a namespace which is empty. Such
638 direct placements is only causing problems. Also don't allow
639 loading into a namespace used for auditing. */
640 else if (__builtin_expect (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER, 0)
641 && (GL(dl_ns)[nsid]._ns_nloaded == 0
642 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
643 _dl_signal_error (EINVAL, file, NULL,
644 N_("invalid target namespace in dlmopen()"));
646 else if ((nsid == LM_ID_BASE || nsid == __LM_ID_CALLER)
647 && GL(dl_ns)[LM_ID_BASE]._ns_loaded == NULL
652 struct dl_open_args args;
655 args.caller_dlopen = caller_dlopen;
656 args.caller_dl_open = RETURN_ADDRESS (0);
664 const char *errstring;
666 int errcode = _dl_catch_error (&objname, &errstring, &malloced,
667 dl_open_worker, &args);
669 #if defined USE_LDCONFIG && !defined MAP_COPY
670 /* We must unmap the cache file. */
674 /* See if an error occurred during loading. */
675 if (__builtin_expect (errstring != NULL, 0))
677 /* Remove the object from memory. It may be in an inconsistent
678 state if relocation failed, for example. */
681 /* Maybe some of the modules which were loaded use TLS.
682 Since it will be removed in the following _dl_close call
683 we have to mark the dtv array as having gaps to fill the
684 holes. This is a pessimistic assumption which won't hurt
685 if not true. There is no need to do this when we are
686 loading the auditing DSOs since TLS has not yet been set
688 if ((mode & __RTLD_AUDIT) == 0)
689 GL(dl_tls_dtv_gaps) = true;
691 _dl_close_worker (args.map);
694 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
696 /* Release the lock. */
697 __rtld_lock_unlock_recursive (GL(dl_load_lock));
699 /* Make a local copy of the error string so that we can release the
700 memory allocated for it. */
701 size_t len_errstring = strlen (errstring) + 1;
702 char *local_errstring;
703 if (objname == errstring + len_errstring)
705 size_t total_len = len_errstring + strlen (objname) + 1;
706 local_errstring = alloca (total_len);
707 memcpy (local_errstring, errstring, total_len);
708 objname = local_errstring + len_errstring;
712 local_errstring = alloca (len_errstring);
713 memcpy (local_errstring, errstring, len_errstring);
717 free ((char *) errstring);
719 /* Reraise the error. */
720 _dl_signal_error (errcode, objname, NULL, local_errstring);
723 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
725 /* Release the lock. */
726 __rtld_lock_unlock_recursive (GL(dl_load_lock));
733 _dl_show_scope (struct link_map *l, int from)
735 _dl_debug_printf ("object=%s [%lu]\n",
736 DSO_FILENAME (l->l_name), l->l_ns);
737 if (l->l_scope != NULL)
738 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
740 _dl_debug_printf (" scope %u:", scope_cnt);
742 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
743 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
744 _dl_debug_printf_c (" %s",
745 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
747 _dl_debug_printf_c (" %s", RTLD_PROGNAME);
749 _dl_debug_printf_c ("\n");
752 _dl_debug_printf (" no scope\n");
753 _dl_debug_printf ("\n");
757 /* Return non-zero if ADDR lies within one of L's segments. */
760 _dl_addr_inside_object (struct link_map *l, const ElfW(Addr) addr)
763 const ElfW(Addr) reladdr = addr - l->l_addr;
766 if (l->l_phdr[n].p_type == PT_LOAD
767 && reladdr - l->l_phdr[n].p_vaddr >= 0
768 && reladdr - l->l_phdr[n].p_vaddr < l->l_phdr[n].p_memsz)