1 /* Load a shared object at runtime, relocate it, and run its initializer.
2 Copyright (C) 1996-2020 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
27 #include <sys/mman.h> /* Check whether MAP_COPY is defined. */
28 #include <sys/param.h>
29 #include <libc-lock.h>
31 #include <sysdep-cancel.h>
33 #include <stap-probe.h>
35 #include <libc-internal.h>
36 #include <array_length.h>
42 /* We must be careful not to leave us in an inconsistent state. Thus we
43 catch any error and re-raise it after cleaning up. */
49 /* This is the caller of the dlopen() function. */
50 const void *caller_dlopen;
55 /* Original value of _ns_global_scope_pending_adds. Set by
56 dl_open_worker. Only valid if nsid is a real namespace
58 unsigned int original_global_scope_pending_adds;
60 /* Original parameters to the program and the current environment. */
66 /* Called in case the global scope cannot be extended. */
67 static void __attribute__ ((noreturn))
68 add_to_global_resize_failure (struct link_map *new)
70 _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
71 N_ ("cannot extend global scope"));
74 /* Grow the global scope array for the namespace, so that all the new
75 global objects can be added later in add_to_global_update, without
76 risk of memory allocation failure. add_to_global_resize raises
77 exceptions for memory allocation errors. */
79 add_to_global_resize (struct link_map *new)
81 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
83 /* Count the objects we have to put in the global scope. */
84 unsigned int to_add = 0;
85 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
86 if (new->l_searchlist.r_list[cnt]->l_global == 0)
89 /* The symbols of the new objects and its dependencies are to be
90 introduced into the global scope that will be used to resolve
91 references from other dynamically-loaded objects.
93 The global scope is the searchlist in the main link map. We
94 extend this list if necessary. There is one problem though:
95 since this structure was allocated very early (before the libc
96 is loaded) the memory it uses is allocated by the malloc()-stub
97 in the ld.so. When we come here these functions are not used
98 anymore. Instead the malloc() implementation of the libc is
99 used. But this means the block from the main map cannot be used
100 in an realloc() call. Therefore we allocate a completely new
101 array the first time we have to add something to the locale scope. */
103 if (__builtin_add_overflow (ns->_ns_global_scope_pending_adds, to_add,
104 &ns->_ns_global_scope_pending_adds))
105 add_to_global_resize_failure (new);
107 unsigned int new_size = 0; /* 0 means no new allocation. */
108 void *old_global = NULL; /* Old allocation if free-able. */
110 /* Minimum required element count for resizing. Adjusted below for
111 an exponential resizing policy. */
112 size_t required_new_size;
113 if (__builtin_add_overflow (ns->_ns_main_searchlist->r_nlist,
114 ns->_ns_global_scope_pending_adds,
116 add_to_global_resize_failure (new);
118 if (ns->_ns_global_scope_alloc == 0)
120 if (__builtin_add_overflow (required_new_size, 8, &new_size))
121 add_to_global_resize_failure (new);
123 else if (required_new_size > ns->_ns_global_scope_alloc)
125 if (__builtin_mul_overflow (required_new_size, 2, &new_size))
126 add_to_global_resize_failure (new);
128 /* The old array was allocated with our malloc, not the minimal
130 old_global = ns->_ns_main_searchlist->r_list;
135 size_t allocation_size;
136 if (__builtin_mul_overflow (new_size, sizeof (struct link_map *),
138 add_to_global_resize_failure (new);
139 struct link_map **new_global = malloc (allocation_size);
140 if (new_global == NULL)
141 add_to_global_resize_failure (new);
143 /* Copy over the old entries. */
144 memcpy (new_global, ns->_ns_main_searchlist->r_list,
145 ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *));
147 ns->_ns_global_scope_alloc = new_size;
148 ns->_ns_main_searchlist->r_list = new_global;
150 if (!RTLD_SINGLE_THREAD_P)
151 THREAD_GSCOPE_WAIT ();
157 /* Actually add the new global objects to the global scope. Must be
158 called after add_to_global_resize. This function cannot fail. */
160 add_to_global_update (struct link_map *new)
162 struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
164 /* Now add the new entries. */
165 unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
166 for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
168 struct link_map *map = new->l_searchlist.r_list[cnt];
170 if (map->l_global == 0)
174 /* The array has been resized by add_to_global_resize. */
175 assert (new_nlist < ns->_ns_global_scope_alloc);
177 ns->_ns_main_searchlist->r_list[new_nlist++] = map;
179 /* We modify the global scope. Report this. */
180 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
181 _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
182 map->l_name, map->l_ns);
186 /* Some of the pending adds have been performed by the loop above.
187 Adjust the counter accordingly. */
188 unsigned int added = new_nlist - ns->_ns_main_searchlist->r_nlist;
189 assert (added <= ns->_ns_global_scope_pending_adds);
190 ns->_ns_global_scope_pending_adds -= added;
192 atomic_write_barrier ();
193 ns->_ns_main_searchlist->r_nlist = new_nlist;
196 /* Search link maps in all namespaces for the DSO that contains the object at
197 address ADDR. Returns the pointer to the link map of the matching DSO, or
198 NULL if a match is not found. */
200 _dl_find_dso_for_object (const ElfW(Addr) addr)
204 /* Find the highest-addressed object that ADDR is not below. */
205 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
206 for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
207 if (addr >= l->l_map_start && addr < l->l_map_end
209 || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
211 assert (ns == l->l_ns);
216 rtld_hidden_def (_dl_find_dso_for_object);
218 /* Return true if NEW is found in the scope for MAP. */
220 scope_has_map (struct link_map *map, struct link_map *new)
223 for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt)
224 if (map->l_scope[cnt] == &new->l_searchlist)
229 /* Return the length of the scope for MAP. */
231 scope_size (struct link_map *map)
234 for (cnt = 0; map->l_scope[cnt] != NULL; )
239 /* Resize the scopes of depended-upon objects, so that the new object
240 can be added later without further allocation of memory. This
241 function can raise an exceptions due to malloc failure. */
243 resize_scopes (struct link_map *new)
245 /* If the file is not loaded now as a dependency, add the search
246 list of the newly loaded object to the scope. */
247 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
249 struct link_map *imap = new->l_searchlist.r_list[i];
251 /* If the initializer has been called already, the object has
252 not been loaded here and now. */
253 if (imap->l_init_called && imap->l_type == lt_loaded)
255 if (scope_has_map (imap, new))
256 /* Avoid duplicates. */
259 size_t cnt = scope_size (imap);
260 if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
262 /* The l_scope array is too small. Allocate a new one
265 struct r_scope_elem **newp;
267 if (imap->l_scope != imap->l_scope_mem
268 && imap->l_scope_max < array_length (imap->l_scope_mem))
270 /* If the current l_scope memory is not pointing to
271 the static memory in the structure, but the
272 static memory in the structure is large enough to
273 use for cnt + 1 scope entries, then switch to
274 using the static memory. */
275 new_size = array_length (imap->l_scope_mem);
276 newp = imap->l_scope_mem;
280 new_size = imap->l_scope_max * 2;
281 newp = (struct r_scope_elem **)
282 malloc (new_size * sizeof (struct r_scope_elem *));
284 _dl_signal_error (ENOMEM, "dlopen", NULL,
285 N_("cannot create scope list"));
288 /* Copy the array and the terminating NULL. */
289 memcpy (newp, imap->l_scope,
290 (cnt + 1) * sizeof (imap->l_scope[0]));
291 struct r_scope_elem **old = imap->l_scope;
293 imap->l_scope = newp;
295 if (old != imap->l_scope_mem)
296 _dl_scope_free (old);
298 imap->l_scope_max = new_size;
304 /* Second stage of resize_scopes: Add NEW to the scopes. Also print
305 debugging information about scopes if requested.
307 This function cannot raise an exception because all required memory
308 has been allocated by a previous call to resize_scopes. */
310 update_scopes (struct link_map *new)
312 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
314 struct link_map *imap = new->l_searchlist.r_list[i];
317 if (imap->l_init_called && imap->l_type == lt_loaded)
319 if (scope_has_map (imap, new))
320 /* Avoid duplicates. */
323 size_t cnt = scope_size (imap);
324 /* Assert that resize_scopes has sufficiently enlarged the
326 assert (cnt + 1 < imap->l_scope_max);
328 /* First terminate the extended list. Otherwise a thread
329 might use the new last element and then use the garbage
331 imap->l_scope[cnt + 1] = NULL;
332 atomic_write_barrier ();
333 imap->l_scope[cnt] = &new->l_searchlist;
338 /* Print scope information. */
339 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
340 _dl_show_scope (imap, from_scope);
344 /* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate
345 space in GL (dl_tls_dtv_slotinfo_list). This can raise an
346 exception. The return value is true if any of the new objects use
349 resize_tls_slotinfo (struct link_map *new)
351 bool any_tls = false;
352 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
354 struct link_map *imap = new->l_searchlist.r_list[i];
356 /* Only add TLS memory if this object is loaded now and
357 therefore is not yet initialized. */
358 if (! imap->l_init_called && imap->l_tls_blocksize > 0)
360 _dl_add_to_slotinfo (imap, false);
367 /* Second stage of TLS update, after resize_tls_slotinfo. This
368 function does not raise any exception. It should only be called if
369 resize_tls_slotinfo returned true. */
371 update_tls_slotinfo (struct link_map *new)
373 unsigned int first_static_tls = new->l_searchlist.r_nlist;
374 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
376 struct link_map *imap = new->l_searchlist.r_list[i];
378 /* Only add TLS memory if this object is loaded now and
379 therefore is not yet initialized. */
380 if (! imap->l_init_called && imap->l_tls_blocksize > 0)
382 _dl_add_to_slotinfo (imap, true);
384 if (imap->l_need_tls_init
385 && first_static_tls == new->l_searchlist.r_nlist)
386 first_static_tls = i;
390 if (__builtin_expect (++GL(dl_tls_generation) == 0, 0))
391 _dl_fatal_printf (N_("\
392 TLS generation counter wrapped! Please report this."));
394 /* We need a second pass for static tls data, because
395 _dl_update_slotinfo must not be run while calls to
396 _dl_add_to_slotinfo are still pending. */
397 for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
399 struct link_map *imap = new->l_searchlist.r_list[i];
401 if (imap->l_need_tls_init
402 && ! imap->l_init_called
403 && imap->l_tls_blocksize > 0)
405 /* For static TLS we have to allocate the memory here and
406 now, but we can delay updating the DTV. */
407 imap->l_need_tls_init = 0;
409 /* Update the slot information data for at least the
410 generation of the DSO we are allocating data for. */
412 /* FIXME: This can terminate the process on memory
413 allocation failure. It is not possible to raise
414 exceptions from this context; to fix this bug,
415 _dl_update_slotinfo would have to be split into two
416 operations, similar to resize_scopes and update_scopes
417 above. This is related to bug 16134. */
418 _dl_update_slotinfo (imap->l_tls_modid);
421 GL(dl_init_static_tls) (imap);
422 assert (imap->l_need_tls_init == 0);
427 /* Mark the objects as NODELETE if required. This is delayed until
428 after dlopen failure is not possible, so that _dl_close can clean
429 up objects if necessary. */
431 activate_nodelete (struct link_map *new)
433 /* It is necessary to traverse the entire namespace. References to
434 objects in the global scope and unique symbol bindings can force
435 NODELETE status for objects outside the local scope. */
436 for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL;
438 if (l->l_nodelete_pending)
440 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
441 _dl_debug_printf ("activating NODELETE for %s [%lu]\n",
444 /* The flag can already be true at this point, e.g. a signal
445 handler may have triggered lazy binding and set NODELETE
446 status immediately. */
447 l->l_nodelete_active = true;
449 /* This is just a debugging aid, to indicate that
450 activate_nodelete has run for this map. */
451 l->l_nodelete_pending = false;
455 /* struct dl_init_args and call_dl_init are used to call _dl_init with
456 exception handling disabled. */
459 struct link_map *new;
466 call_dl_init (void *closure)
468 struct dl_init_args *args = closure;
469 _dl_init (args->new, args->argc, args->argv, args->env);
473 dl_open_worker (void *a)
475 struct dl_open_args *args = a;
476 const char *file = args->file;
477 int mode = args->mode;
478 struct link_map *call_map = NULL;
480 /* Determine the caller's map if necessary. This is needed in case
481 we have a DST, when we don't know the namespace ID we have to put
482 the new object in, or when the file name has no path in which
483 case we need to look along the RUNPATH/RPATH of the caller. */
484 const char *dst = strchr (file, '$');
485 if (dst != NULL || args->nsid == __LM_ID_CALLER
486 || strchr (file, '/') == NULL)
488 const void *caller_dlopen = args->caller_dlopen;
490 /* We have to find out from which object the caller is calling.
491 By default we assume this is the main application. */
492 call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
494 struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
499 if (args->nsid == __LM_ID_CALLER)
500 args->nsid = call_map->l_ns;
503 /* Retain the old value, so that it can be restored. */
504 args->original_global_scope_pending_adds
505 = GL (dl_ns)[args->nsid]._ns_global_scope_pending_adds;
507 /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
508 may not be true if this is a recursive call to dlopen. */
509 _dl_debug_initialize (0, args->nsid);
511 /* Load the named object. */
512 struct link_map *new;
513 args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
514 mode | __RTLD_CALLMAP, args->nsid);
516 /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
517 set and the object is not already loaded. */
520 assert (mode & RTLD_NOLOAD);
524 if (__glibc_unlikely (mode & __RTLD_SPROF))
525 /* This happens only if we load a DSO for 'sprof'. */
528 /* This object is directly loaded. */
529 ++new->l_direct_opencount;
531 /* It was already open. */
532 if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
534 /* Let the user know about the opencount. */
535 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
536 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
537 new->l_name, new->l_ns, new->l_direct_opencount);
539 /* If the user requested the object to be in the global
540 namespace but it is not so far, prepare to add it now. This
541 can raise an exception to do a malloc failure. */
542 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
543 add_to_global_resize (new);
545 /* Mark the object as not deletable if the RTLD_NODELETE flags
547 if (__glibc_unlikely (mode & RTLD_NODELETE))
549 if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
550 && !new->l_nodelete_active)
551 _dl_debug_printf ("marking %s [%lu] as NODELETE\n",
552 new->l_name, new->l_ns);
553 new->l_nodelete_active = true;
556 /* Finalize the addition to the global scope. */
557 if ((mode & RTLD_GLOBAL) && new->l_global == 0)
558 add_to_global_update (new);
560 assert (_dl_debug_initialize (0, args->nsid)->r_state == RT_CONSISTENT);
565 /* Schedule NODELETE marking for the directly loaded object if
567 if (__glibc_unlikely (mode & RTLD_NODELETE))
568 new->l_nodelete_pending = true;
570 /* Load that object's dependencies. */
571 _dl_map_object_deps (new, NULL, 0, 0,
572 mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
574 /* So far, so good. Now check the versions. */
575 for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
576 if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
577 (void) _dl_check_map_versions (new->l_searchlist.r_list[i]->l_real,
581 /* Auditing checkpoint: we have added all objects. */
582 if (__glibc_unlikely (GLRO(dl_naudit) > 0))
584 struct link_map *head = GL(dl_ns)[new->l_ns]._ns_loaded;
585 /* Do not call the functions for any auditing object. */
586 if (head->l_auditing == 0)
588 struct audit_ifaces *afct = GLRO(dl_audit);
589 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
591 if (afct->activity != NULL)
593 struct auditstate *state = link_map_audit_state (head, cnt);
594 afct->activity (&state->cookie, LA_ACT_CONSISTENT);
603 /* Notify the debugger all new objects are now ready to go. */
604 struct r_debug *r = _dl_debug_initialize (0, args->nsid);
605 r->r_state = RT_CONSISTENT;
607 LIBC_PROBE (map_complete, 3, args->nsid, r, new);
609 _dl_open_check (new);
611 /* Print scope information. */
612 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
613 _dl_show_scope (new, 0);
615 /* Only do lazy relocation if `LD_BIND_NOW' is not set. */
616 int reloc_mode = mode & __RTLD_AUDIT;
618 reloc_mode |= mode & RTLD_LAZY;
620 /* Sort the objects by dependency for the relocation process. This
621 allows IFUNC relocations to work and it also means copy
622 relocation of dependencies are if necessary overwritten. */
623 unsigned int nmaps = 0;
625 struct link_map *l = new->l_initfini[0];
628 if (! l->l_real->l_relocated)
630 l = new->l_initfini[++j];
633 /* Stack allocation is limited by the number of loaded objects. */
634 struct link_map *maps[nmaps];
637 l = new->l_initfini[0];
640 if (! l->l_real->l_relocated)
642 l = new->l_initfini[++j];
645 _dl_sort_maps (maps, nmaps, NULL, false);
647 int relocation_in_progress = 0;
649 /* Perform relocation. This can trigger lazy binding in IFUNC
650 resolvers. For NODELETE mappings, these dependencies are not
651 recorded because the flag has not been applied to the newly
652 loaded objects. This means that upon dlopen failure, these
653 NODELETE objects can be unloaded despite existing references to
654 them. However, such relocation dependencies in IFUNC resolvers
655 are undefined anyway, so this is not a problem. */
657 for (unsigned int i = nmaps; i-- > 0; )
661 if (! relocation_in_progress)
663 /* Notify the debugger that relocations are about to happen. */
664 LIBC_PROBE (reloc_start, 2, args->nsid, r);
665 relocation_in_progress = 1;
669 if (__glibc_unlikely (GLRO(dl_profile) != NULL))
671 /* If this here is the shared object which we want to profile
672 make sure the profile is started. We can find out whether
673 this is necessary or not by observing the `_dl_profile_map'
674 variable. If it was NULL but is not NULL afterwards we must
675 start the profiling. */
676 struct link_map *old_profile_map = GL(dl_profile_map);
678 _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
680 if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
682 /* We must prepare the profiling. */
683 _dl_start_profile ();
685 /* Prevent unloading the object. */
686 GL(dl_profile_map)->l_nodelete_active = true;
691 _dl_relocate_object (l, l->l_scope, reloc_mode, 0);
694 /* This only performs the memory allocations. The actual update of
695 the scopes happens below, after failure is impossible. */
698 /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data
700 bool any_tls = resize_tls_slotinfo (new);
702 /* Perform the necessary allocations for adding new global objects
703 to the global scope below. */
704 if (mode & RTLD_GLOBAL)
705 add_to_global_resize (new);
707 /* Demarcation point: After this, no recoverable errors are allowed.
708 All memory allocations for new objects must have happened
711 /* Finalize the NODELETE status first. This comes before
712 update_scopes, so that lazy binding will not see pending NODELETE
713 state for newly loaded objects. There is a compiler barrier in
714 update_scopes which ensures that the changes from
715 activate_nodelete are visible before new objects show up in the
717 activate_nodelete (new);
719 /* Second stage after resize_scopes: Actually perform the scope
720 update. After this, dlsym and lazy binding can bind to new
724 /* FIXME: It is unclear whether the order here is correct.
725 Shouldn't new objects be made available for binding (and thus
726 execution) only after there TLS data has been set up fully?
727 Fixing bug 16134 will likely make this distinction less
730 /* Second stage after resize_tls_slotinfo: Update the slotinfo data
733 /* FIXME: This calls _dl_update_slotinfo, which aborts the process
734 on memory allocation failure. See bug 16134. */
735 update_tls_slotinfo (new);
737 /* Notify the debugger all new objects have been relocated. */
738 if (relocation_in_progress)
739 LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
742 DL_STATIC_INIT (new);
745 /* Perform the necessary allocations for adding new global objects
746 to the global scope below, via add_to_global_update. */
747 if (mode & RTLD_GLOBAL)
748 add_to_global_resize (new);
750 /* Run the initializer functions of new objects. Temporarily
751 disable the exception handler, so that lazy binding failures are
754 struct dl_init_args init_args =
761 _dl_catch_exception (NULL, call_dl_init, &init_args);
764 /* Now we can make the new map available in the global scope. */
765 if (mode & RTLD_GLOBAL)
766 add_to_global_update (new);
769 /* We must be the static _dl_open in libc.a. A static program that
770 has loaded a dynamic object now has competition. */
771 __libc_multiple_libcs = 1;
774 /* Let the user know about the opencount. */
775 if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
776 _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
777 new->l_name, new->l_ns, new->l_direct_opencount);
781 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
782 int argc, char *argv[], char *env[])
784 if ((mode & RTLD_BINDING_MASK) == 0)
785 /* One of the flags must be set. */
786 _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
788 /* Make sure we are alone. */
789 __rtld_lock_lock_recursive (GL(dl_load_lock));
791 if (__glibc_unlikely (nsid == LM_ID_NEWLM))
793 /* Find a new namespace. */
794 for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
795 if (GL(dl_ns)[nsid]._ns_loaded == NULL)
798 if (__glibc_unlikely (nsid == DL_NNS))
800 /* No more namespace available. */
801 __rtld_lock_unlock_recursive (GL(dl_load_lock));
803 _dl_signal_error (EINVAL, file, NULL, N_("\
804 no more namespaces available for dlmopen()"));
806 else if (nsid == GL(dl_nns))
808 __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
812 _dl_debug_initialize (0, nsid)->r_state = RT_CONSISTENT;
814 /* Never allow loading a DSO in a namespace which is empty. Such
815 direct placements is only causing problems. Also don't allow
816 loading into a namespace used for auditing. */
817 else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
818 && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
819 /* This prevents the [NSID] index expressions from being
820 evaluated, so the compiler won't think that we are
821 accessing an invalid index here in the !SHARED case where
822 DL_NNS is 1 and so any NSID != 0 is invalid. */
824 || GL(dl_ns)[nsid]._ns_nloaded == 0
825 || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
826 _dl_signal_error (EINVAL, file, NULL,
827 N_("invalid target namespace in dlmopen()"));
829 struct dl_open_args args;
832 args.caller_dlopen = caller_dlopen;
839 struct dl_exception exception;
840 int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
842 #if defined USE_LDCONFIG && !defined MAP_COPY
843 /* We must unmap the cache file. */
847 /* Do this for both the error and success cases. The old value has
848 only been determined if the namespace ID was assigned (i.e., it
849 is not __LM_ID_CALLER). In the success case, we actually may
850 have consumed more pending adds than planned (because the local
851 scopes overlap in case of a recursive dlopen, the inner dlopen
852 doing some of the globalization work of the outer dlopen), so the
853 old pending adds value is larger than absolutely necessary.
854 Since it is just a conservative upper bound, this is harmless.
855 The top-level dlopen call will restore the field to zero. */
857 GL (dl_ns)[args.nsid]._ns_global_scope_pending_adds
858 = args.original_global_scope_pending_adds;
860 /* See if an error occurred during loading. */
861 if (__glibc_unlikely (exception.errstring != NULL))
863 /* Remove the object from memory. It may be in an inconsistent
864 state if relocation failed, for example. */
867 /* Maybe some of the modules which were loaded use TLS.
868 Since it will be removed in the following _dl_close call
869 we have to mark the dtv array as having gaps to fill the
870 holes. This is a pessimistic assumption which won't hurt
871 if not true. There is no need to do this when we are
872 loading the auditing DSOs since TLS has not yet been set
874 if ((mode & __RTLD_AUDIT) == 0)
875 GL(dl_tls_dtv_gaps) = true;
877 _dl_close_worker (args.map, true);
879 /* All l_nodelete_pending objects should have been deleted
880 at this point, which is why it is not necessary to reset
884 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
886 /* Release the lock. */
887 __rtld_lock_unlock_recursive (GL(dl_load_lock));
889 /* Reraise the error. */
890 _dl_signal_exception (errcode, &exception, NULL);
893 assert (_dl_debug_initialize (0, args.nsid)->r_state == RT_CONSISTENT);
895 /* Release the lock. */
896 __rtld_lock_unlock_recursive (GL(dl_load_lock));
903 _dl_show_scope (struct link_map *l, int from)
905 _dl_debug_printf ("object=%s [%lu]\n",
906 DSO_FILENAME (l->l_name), l->l_ns);
907 if (l->l_scope != NULL)
908 for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
910 _dl_debug_printf (" scope %u:", scope_cnt);
912 for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
913 if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
914 _dl_debug_printf_c (" %s",
915 l->l_scope[scope_cnt]->r_list[cnt]->l_name);
917 _dl_debug_printf_c (" %s", RTLD_PROGNAME);
919 _dl_debug_printf_c ("\n");
922 _dl_debug_printf (" no scope\n");
923 _dl_debug_printf ("\n");