1 /* Map in a shared object's segments from the file.
2 Copyright (C) 1995-2013 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
28 #include <bits/wordsize.h>
30 #include <sys/param.h>
32 #include <sys/types.h>
33 #include "dynamic-link.h"
35 #include <stackinfo.h>
38 #include <stap-probe.h>
42 /* On some systems, no flag bits are given to specify file mapping. */
47 /* The right way to map in the shared library files is MAP_COPY, which
48 makes a virtual copy of the data at the time of the mmap call; this
49 guarantees the mapped pages will be consistent even if the file is
50 overwritten. Some losing VM systems like Linux's lack MAP_COPY. All we
51 get is MAP_PRIVATE, which copies each page when it is modified; this
52 means if the file is overwritten, we may at some point get some pages
53 from the new version after starting with pages from the old version.
55 To make up for the lack and avoid the overwriting problem,
56 what Linux does have is MAP_DENYWRITE. This prevents anyone
57 from modifying the file while we have it mapped. */
60 # define MAP_COPY (MAP_PRIVATE | MAP_DENYWRITE)
62 # define MAP_COPY MAP_PRIVATE
66 /* Some systems link their relocatable objects for another base address
67 than 0. We want to know the base address for these such that we can
68 subtract this address from the segment addresses during mapping.
69 This results in a more efficient address space usage. Defaults to
70 zero for almost all systems. */
72 # define MAP_BASE_ADDR(l) 0
77 #if BYTE_ORDER == BIG_ENDIAN
78 # define byteorder ELFDATA2MSB
79 #elif BYTE_ORDER == LITTLE_ENDIAN
80 # define byteorder ELFDATA2LSB
82 # error "Unknown BYTE_ORDER " BYTE_ORDER
83 # define byteorder ELFDATANONE
86 #define STRING(x) __STRING (x)
88 /* Handle situations where we have a preferred location in memory for
89 the shared objects. */
90 #ifdef ELF_PREFERRED_ADDRESS_DATA
91 ELF_PREFERRED_ADDRESS_DATA;
93 #ifndef ELF_PREFERRED_ADDRESS
94 # define ELF_PREFERRED_ADDRESS(loader, maplength, mapstartpref) (mapstartpref)
96 #ifndef ELF_FIXED_ADDRESS
97 # define ELF_FIXED_ADDRESS(loader, mapstart) ((void) 0)
101 int __stack_prot attribute_hidden attribute_relro
102 #if _STACK_GROWS_DOWN && defined PROT_GROWSDOWN
104 #elif _STACK_GROWS_UP && defined PROT_GROWSUP
111 /* Type for the buffer we put the ELF header and hopefully the program
112 header. This buffer does not really have to be too large. In most
113 cases the program header follows the ELF header directly. If this
114 is not the case all bets are off and we can make the header
115 arbitrarily large and still won't get it read. This means the only
116 question is how large are the ELF and program header combined. The
117 ELF header 32-bit files is 52 bytes long and in 64-bit files is 64
118 bytes long. Each program header entry is again 32 and 56 bytes
119 long respectively. I.e., even with a file which has 10 program
120 header entries we only have to read 372B/624B respectively. Add to
121 this a bit of margin for program notes and reading 512B and 832B
122 for 32-bit and 64-bit files respecitvely is enough. If this
123 heuristic should really fail for some file the code in
124 `_dl_map_object_from_fd' knows how to recover. */
129 # define FILEBUF_SIZE 512
131 # define FILEBUF_SIZE 832
133 char buf[FILEBUF_SIZE] __attribute__ ((aligned (__alignof (ElfW(Ehdr)))));
136 /* This is the decomposed LD_LIBRARY_PATH search path. */
137 static struct r_search_path_struct env_path_list attribute_relro;
139 /* List of the hardware capabilities we might end up using. */
140 static const struct r_strlenpair *capstr attribute_relro;
141 static size_t ncapstr attribute_relro;
142 static size_t max_capstrlen attribute_relro;
145 /* Get the generated information about the trusted directories. */
146 #include "trusted-dirs.h"
148 static const char system_dirs[] = SYSTEM_DIRS;
149 static const size_t system_dirs_len[] =
153 #define nsystem_dirs_len \
154 (sizeof (system_dirs_len) / sizeof (system_dirs_len[0]))
157 /* Local version of `strdup' function. */
159 local_strdup (const char *s)
161 size_t len = strlen (s) + 1;
162 void *new = malloc (len);
167 return (char *) memcpy (new, s, len);
172 is_trusted_path (const char *path, size_t len)
174 const char *trun = system_dirs;
176 for (size_t idx = 0; idx < nsystem_dirs_len; ++idx)
178 if (len == system_dirs_len[idx] && memcmp (trun, path, len) == 0)
182 trun += system_dirs_len[idx] + 1;
190 is_trusted_path_normalize (const char *path, size_t len)
201 char *npath = (char *) alloca (len + 2);
203 while (*path != '\0')
209 if (path[2] == '.' && (path[3] == '/' || path[3] == '\0'))
211 while (wnp > npath && *--wnp != '/')
216 else if (path[2] == '/' || path[2] == '\0')
223 if (wnp > npath && wnp[-1] == '/')
233 if (wnp == npath || wnp[-1] != '/')
236 const char *trun = system_dirs;
238 for (size_t idx = 0; idx < nsystem_dirs_len; ++idx)
240 if (wnp - npath >= system_dirs_len[idx]
241 && memcmp (trun, npath, system_dirs_len[idx]) == 0)
245 trun += system_dirs_len[idx] + 1;
253 is_dst (const char *start, const char *name, const char *str,
254 int is_path, int secure)
257 bool is_curly = false;
266 while (name[len] == str[len] && name[len] != '\0')
271 if (name[len] != '}')
274 /* Point again at the beginning of the name. */
276 /* Skip over closing curly brace and adjust for the --name. */
279 else if (name[len] != '\0' && name[len] != '/'
280 && (!is_path || name[len] != ':'))
283 if (__builtin_expect (secure, 0)
284 && ((name[len] != '\0' && name[len] != '/'
285 && (!is_path || name[len] != ':'))
286 || (name != start + 1 && (!is_path || name[-2] != ':'))))
294 _dl_dst_count (const char *name, int is_path)
296 const char *const start = name;
303 /* $ORIGIN is not expanded for SUID/GUID programs (except if it
304 is $ORIGIN alone) and it must always appear first in path. */
306 if ((len = is_dst (start, name, "ORIGIN", is_path,
307 INTUSE(__libc_enable_secure))) != 0
308 || (len = is_dst (start, name, "PLATFORM", is_path, 0)) != 0
309 || (len = is_dst (start, name, "LIB", is_path, 0)) != 0)
312 name = strchr (name + len, '$');
314 while (name != NULL);
321 _dl_dst_substitute (struct link_map *l, const char *name, char *result,
324 const char *const start = name;
326 /* Now fill the result path. While copying over the string we keep
327 track of the start of the last path element. When we come accross
328 a DST we copy over the value or (if the value is not available)
329 leave the entire path element out. */
331 char *last_elem = result;
332 bool check_for_trusted = false;
336 if (__builtin_expect (*name == '$', 0))
338 const char *repl = NULL;
342 if ((len = is_dst (start, name, "ORIGIN", is_path,
343 INTUSE(__libc_enable_secure))) != 0)
346 check_for_trusted = (INTUSE(__libc_enable_secure)
347 && l->l_type == lt_executable);
349 else if ((len = is_dst (start, name, "PLATFORM", is_path, 0)) != 0)
350 repl = GLRO(dl_platform);
351 else if ((len = is_dst (start, name, "LIB", is_path, 0)) != 0)
354 if (repl != NULL && repl != (const char *) -1)
356 wp = __stpcpy (wp, repl);
361 /* We cannot use this path element, the value of the
362 replacement is unknown. */
365 while (*name != '\0' && (!is_path || *name != ':'))
367 /* Also skip following colon if this is the first rpath
368 element, but keep an empty element at the end. */
369 if (wp == result && is_path && *name == ':' && name[1] != '\0')
373 /* No DST we recognize. */
379 if (is_path && *name == ':')
381 /* In SUID/SGID programs, after $ORIGIN expansion the
382 normalized path must be rooted in one of the trusted
384 if (__builtin_expect (check_for_trusted, false)
385 && !is_trusted_path_normalize (last_elem, wp - last_elem))
390 check_for_trusted = false;
394 while (*name != '\0');
396 /* In SUID/SGID programs, after $ORIGIN expansion the normalized
397 path must be rooted in one of the trusted directories. */
398 if (__builtin_expect (check_for_trusted, false)
399 && !is_trusted_path_normalize (last_elem, wp - last_elem))
408 /* Return copy of argument with all recognized dynamic string tokens
409 ($ORIGIN and $PLATFORM for now) replaced. On some platforms it
410 might not be possible to determine the path from which the object
411 belonging to the map is loaded. In this case the path element
412 containing $ORIGIN is left out. */
414 expand_dynamic_string_token (struct link_map *l, const char *s, int is_path)
416 /* We make two runs over the string. First we determine how large the
417 resulting string is and then we copy it over. Since this is no
418 frequently executed operation we are looking here not for performance
419 but rather for code size. */
424 /* Determine the number of DST elements. */
425 cnt = DL_DST_COUNT (s, is_path);
427 /* If we do not have to replace anything simply copy the string. */
428 if (__builtin_expect (cnt, 0) == 0)
429 return local_strdup (s);
431 /* Determine the length of the substituted string. */
432 total = DL_DST_REQUIRED (l, s, strlen (s), cnt);
434 /* Allocate the necessary memory. */
435 result = (char *) malloc (total + 1);
439 return _dl_dst_substitute (l, s, result, is_path);
443 /* Add `name' to the list of names for a particular shared object.
444 `name' is expected to have been allocated with malloc and will
445 be freed if the shared object already has this name.
446 Returns false if the object already had this name. */
449 add_name_to_object (struct link_map *l, const char *name)
451 struct libname_list *lnp, *lastp;
452 struct libname_list *newname;
456 for (lnp = l->l_libname; lnp != NULL; lastp = lnp, lnp = lnp->next)
457 if (strcmp (name, lnp->name) == 0)
460 name_len = strlen (name) + 1;
461 newname = (struct libname_list *) malloc (sizeof *newname + name_len);
464 /* No more memory. */
465 _dl_signal_error (ENOMEM, name, NULL, N_("cannot allocate name record"));
468 /* The object should have a libname set from _dl_new_object. */
469 assert (lastp != NULL);
471 newname->name = memcpy (newname + 1, name, name_len);
472 newname->next = NULL;
473 newname->dont_free = 0;
474 lastp->next = newname;
477 /* Standard search directories. */
478 static struct r_search_path_struct rtld_search_dirs attribute_relro;
480 static size_t max_dirnamelen;
482 static struct r_search_path_elem **
483 fillin_rpath (char *rpath, struct r_search_path_elem **result, const char *sep,
484 int check_trusted, const char *what, const char *where)
489 while ((cp = __strsep (&rpath, sep)) != NULL)
491 struct r_search_path_elem *dirp;
492 size_t len = strlen (cp);
494 /* `strsep' can pass an empty string. This has to be
495 interpreted as `use the current directory'. */
498 static const char curwd[] = "./";
502 /* Remove trailing slashes (except for "/"). */
503 while (len > 1 && cp[len - 1] == '/')
506 /* Now add one if there is none so far. */
507 if (len > 0 && cp[len - 1] != '/')
510 /* Make sure we don't use untrusted directories if we run SUID. */
511 if (__builtin_expect (check_trusted, 0) && !is_trusted_path (cp, len))
514 /* See if this directory is already known. */
515 for (dirp = GL(dl_all_dirs); dirp != NULL; dirp = dirp->next)
516 if (dirp->dirnamelen == len && memcmp (cp, dirp->dirname, len) == 0)
521 /* It is available, see whether it's on our own list. */
523 for (cnt = 0; cnt < nelems; ++cnt)
524 if (result[cnt] == dirp)
528 result[nelems++] = dirp;
533 enum r_dir_status init_val;
534 size_t where_len = where ? strlen (where) + 1 : 0;
536 /* It's a new directory. Create an entry and add it. */
537 dirp = (struct r_search_path_elem *)
538 malloc (sizeof (*dirp) + ncapstr * sizeof (enum r_dir_status)
539 + where_len + len + 1);
541 _dl_signal_error (ENOMEM, NULL, NULL,
542 N_("cannot create cache for search path"));
544 dirp->dirname = ((char *) dirp + sizeof (*dirp)
545 + ncapstr * sizeof (enum r_dir_status));
546 *((char *) __mempcpy ((char *) dirp->dirname, cp, len)) = '\0';
547 dirp->dirnamelen = len;
549 if (len > max_dirnamelen)
550 max_dirnamelen = len;
552 /* We have to make sure all the relative directories are
553 never ignored. The current directory might change and
554 all our saved information would be void. */
555 init_val = cp[0] != '/' ? existing : unknown;
556 for (cnt = 0; cnt < ncapstr; ++cnt)
557 dirp->status[cnt] = init_val;
560 if (__builtin_expect (where != NULL, 1))
561 dirp->where = memcpy ((char *) dirp + sizeof (*dirp) + len + 1
562 + (ncapstr * sizeof (enum r_dir_status)),
567 dirp->next = GL(dl_all_dirs);
568 GL(dl_all_dirs) = dirp;
570 /* Put it in the result array. */
571 result[nelems++] = dirp;
575 /* Terminate the array. */
576 result[nelems] = NULL;
584 decompose_rpath (struct r_search_path_struct *sps,
585 const char *rpath, struct link_map *l, const char *what)
587 /* Make a copy we can work with. */
588 const char *where = l->l_name;
591 struct r_search_path_elem **result;
593 /* Initialize to please the compiler. */
594 const char *errstring = NULL;
596 /* First see whether we must forget the RUNPATH and RPATH from this
598 if (__builtin_expect (GLRO(dl_inhibit_rpath) != NULL, 0)
599 && !INTUSE(__libc_enable_secure))
601 const char *inhp = GLRO(dl_inhibit_rpath);
605 const char *wp = where;
607 while (*inhp == *wp && *wp != '\0')
613 if (*wp == '\0' && (*inhp == '\0' || *inhp == ':'))
615 /* This object is on the list of objects for which the
616 RUNPATH and RPATH must not be used. */
617 sps->dirs = (void *) -1;
621 while (*inhp != '\0')
625 while (*inhp != '\0');
628 /* Make a writable copy. At the same time expand possible dynamic
630 copy = expand_dynamic_string_token (l, rpath, 1);
633 errstring = N_("cannot create RUNPATH/RPATH copy");
637 /* Ignore empty rpaths. */
641 sps->dirs = (struct r_search_path_elem **) -1;
645 /* Count the number of necessary elements in the result array. */
647 for (cp = copy; *cp != '\0'; ++cp)
651 /* Allocate room for the result. NELEMS + 1 is an upper limit for the
652 number of necessary entries. */
653 result = (struct r_search_path_elem **) malloc ((nelems + 1 + 1)
658 errstring = N_("cannot create cache for search path");
660 _dl_signal_error (ENOMEM, NULL, NULL, errstring);
663 fillin_rpath (copy, result, ":", 0, what, where);
665 /* Free the copied RPATH string. `fillin_rpath' make own copies if
670 /* The caller will change this value if we haven't used a real malloc. */
675 /* Make sure cached path information is stored in *SP
676 and return true if there are any paths to search there. */
678 cache_rpath (struct link_map *l,
679 struct r_search_path_struct *sp,
683 if (sp->dirs == (void *) -1)
686 if (sp->dirs != NULL)
689 if (l->l_info[tag] == NULL)
691 /* There is no path. */
692 sp->dirs = (void *) -1;
696 /* Make sure the cache information is available. */
697 return decompose_rpath (sp, (const char *) (D_PTR (l, l_info[DT_STRTAB])
698 + l->l_info[tag]->d_un.d_val),
705 _dl_init_paths (const char *llp)
709 struct r_search_path_elem *pelem, **aelem;
714 /* Initialize to please the compiler. */
715 const char *errstring = NULL;
717 /* Fill in the information about the application's RPATH and the
718 directories addressed by the LD_LIBRARY_PATH environment variable. */
720 /* Get the capabilities. */
721 capstr = _dl_important_hwcaps (GLRO(dl_platform), GLRO(dl_platformlen),
722 &ncapstr, &max_capstrlen);
724 /* First set up the rest of the default search directory entries. */
725 aelem = rtld_search_dirs.dirs = (struct r_search_path_elem **)
726 malloc ((nsystem_dirs_len + 1) * sizeof (struct r_search_path_elem *));
727 if (rtld_search_dirs.dirs == NULL)
729 errstring = N_("cannot create search path array");
731 _dl_signal_error (ENOMEM, NULL, NULL, errstring);
734 round_size = ((2 * sizeof (struct r_search_path_elem) - 1
735 + ncapstr * sizeof (enum r_dir_status))
736 / sizeof (struct r_search_path_elem));
738 rtld_search_dirs.dirs[0] = (struct r_search_path_elem *)
739 malloc ((sizeof (system_dirs) / sizeof (system_dirs[0]))
740 * round_size * sizeof (struct r_search_path_elem));
741 if (rtld_search_dirs.dirs[0] == NULL)
743 errstring = N_("cannot create cache for search path");
747 rtld_search_dirs.malloced = 0;
748 pelem = GL(dl_all_dirs) = rtld_search_dirs.dirs[0];
758 pelem->what = "system search path";
761 pelem->dirname = strp;
762 pelem->dirnamelen = system_dirs_len[idx];
763 strp += system_dirs_len[idx] + 1;
765 /* System paths must be absolute. */
766 assert (pelem->dirname[0] == '/');
767 for (cnt = 0; cnt < ncapstr; ++cnt)
768 pelem->status[cnt] = unknown;
770 pelem->next = (++idx == nsystem_dirs_len ? NULL : (pelem + round_size));
774 while (idx < nsystem_dirs_len);
776 max_dirnamelen = SYSTEM_DIRS_MAX_LEN;
780 /* This points to the map of the main object. */
781 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
784 assert (l->l_type != lt_loaded);
786 if (l->l_info[DT_RUNPATH])
788 /* Allocate room for the search path and fill in information
790 decompose_rpath (&l->l_runpath_dirs,
791 (const void *) (D_PTR (l, l_info[DT_STRTAB])
792 + l->l_info[DT_RUNPATH]->d_un.d_val),
794 /* During rtld init the memory is allocated by the stub malloc,
795 prevent any attempt to free it by the normal malloc. */
796 l->l_runpath_dirs.malloced = 0;
798 /* The RPATH is ignored. */
799 l->l_rpath_dirs.dirs = (void *) -1;
803 l->l_runpath_dirs.dirs = (void *) -1;
805 if (l->l_info[DT_RPATH])
807 /* Allocate room for the search path and fill in information
809 decompose_rpath (&l->l_rpath_dirs,
810 (const void *) (D_PTR (l, l_info[DT_STRTAB])
811 + l->l_info[DT_RPATH]->d_un.d_val),
813 /* During rtld init the memory is allocated by the stub
814 malloc, prevent any attempt to free it by the normal
816 l->l_rpath_dirs.malloced = 0;
819 l->l_rpath_dirs.dirs = (void *) -1;
824 if (llp != NULL && *llp != '\0')
827 const char *cp = llp;
832 size_t cnt = DL_DST_COUNT (llp, 1);
833 if (__builtin_expect (cnt == 0, 1))
834 llp_tmp = strdupa (llp);
837 /* Determine the length of the substituted string. */
838 size_t total = DL_DST_REQUIRED (l, llp, strlen (llp), cnt);
840 /* Allocate the necessary memory. */
841 llp_tmp = (char *) alloca (total + 1);
842 llp_tmp = _dl_dst_substitute (l, llp, llp_tmp, 1);
845 llp_tmp = strdupa (llp);
848 /* Decompose the LD_LIBRARY_PATH contents. First determine how many
853 if (*cp == ':' || *cp == ';')
858 env_path_list.dirs = (struct r_search_path_elem **)
859 malloc ((nllp + 1) * sizeof (struct r_search_path_elem *));
860 if (env_path_list.dirs == NULL)
862 errstring = N_("cannot create cache for search path");
866 (void) fillin_rpath (llp_tmp, env_path_list.dirs, ":;",
867 INTUSE(__libc_enable_secure), "LD_LIBRARY_PATH",
870 if (env_path_list.dirs[0] == NULL)
872 free (env_path_list.dirs);
873 env_path_list.dirs = (void *) -1;
876 env_path_list.malloced = 0;
879 env_path_list.dirs = (void *) -1;
884 __attribute__ ((noreturn, noinline))
885 lose (int code, int fd, const char *name, char *realname, struct link_map *l,
886 const char *msg, struct r_debug *r, Lmid_t nsid)
888 /* The file might already be closed. */
891 if (l != NULL && l->l_origin != (char *) -1l)
892 free ((char *) l->l_origin);
898 r->r_state = RT_CONSISTENT;
900 LIBC_PROBE (map_failed, 2, nsid, r);
903 _dl_signal_error (code, name, NULL, msg);
907 /* Map in the shared object NAME, actually located in REALNAME, and already
910 #ifndef EXTERNAL_MAP_FROM_FD
914 _dl_map_object_from_fd (const char *name, int fd, struct filebuf *fbp,
915 char *realname, struct link_map *loader, int l_type,
916 int mode, void **stack_endp, Lmid_t nsid)
918 struct link_map *l = NULL;
919 const ElfW(Ehdr) *header;
920 const ElfW(Phdr) *phdr;
921 const ElfW(Phdr) *ph;
925 /* Initialize to keep the compiler happy. */
926 const char *errstring = NULL;
928 struct r_debug *r = _dl_debug_initialize (0, nsid);
929 bool make_consistent = false;
931 /* Get file information. */
932 if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st) < 0, 0))
934 errstring = N_("cannot stat shared object");
938 lose (errval, fd, name, realname, l, errstring,
939 make_consistent ? r : NULL, nsid);
942 /* Look again to see if the real name matched another already loaded. */
943 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
944 if (l->l_removed == 0 && l->l_ino == st.st_ino && l->l_dev == st.st_dev)
946 /* The object is already loaded.
947 Just bump its reference count and return it. */
950 /* If the name is not in the list of names for this object add
953 add_name_to_object (l, name);
959 /* When loading into a namespace other than the base one we must
960 avoid loading ld.so since there can only be one copy. Ever. */
961 if (__builtin_expect (nsid != LM_ID_BASE, 0)
962 && ((st.st_ino == GL(dl_rtld_map).l_ino
963 && st.st_dev == GL(dl_rtld_map).l_dev)
964 || _dl_name_match_p (name, &GL(dl_rtld_map))))
966 /* This is indeed ld.so. Create a new link_map which refers to
967 the real one for almost everything. */
968 l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
972 /* Refer to the real descriptor. */
973 l->l_real = &GL(dl_rtld_map);
975 /* No need to bump the refcount of the real object, ld.so will
976 never be unloaded. */
979 /* Add the map for the mirrored object to the object list. */
980 _dl_add_to_namespace_list (l, nsid);
986 if (mode & RTLD_NOLOAD)
988 /* We are not supposed to load the object unless it is already
989 loaded. So return now. */
995 /* Print debugging message. */
996 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
997 _dl_debug_printf ("file=%s [%lu]; generating link map\n", name, nsid);
999 /* This is the ELF header. We read it in `open_verify'. */
1000 header = (void *) fbp->buf;
1004 if (_dl_zerofd == -1)
1006 _dl_zerofd = _dl_sysdep_open_zero_fill ();
1007 if (_dl_zerofd == -1)
1011 _dl_signal_error (errno, NULL, NULL,
1012 N_("cannot open zero fill device"));
1017 /* Signal that we are going to add new objects. */
1018 if (r->r_state == RT_CONSISTENT)
1021 /* Auditing checkpoint: we are going to add new objects. */
1022 if ((mode & __RTLD_AUDIT) == 0
1023 && __builtin_expect (GLRO(dl_naudit) > 0, 0))
1025 struct link_map *head = GL(dl_ns)[nsid]._ns_loaded;
1026 /* Do not call the functions for any auditing object. */
1027 if (head->l_auditing == 0)
1029 struct audit_ifaces *afct = GLRO(dl_audit);
1030 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
1032 if (afct->activity != NULL)
1033 afct->activity (&head->l_audit[cnt].cookie, LA_ACT_ADD);
1041 /* Notify the debugger we have added some objects. We need to
1042 call _dl_debug_initialize in a static program in case dynamic
1043 linking has not been used before. */
1044 r->r_state = RT_ADD;
1046 LIBC_PROBE (map_start, 2, nsid, r);
1047 make_consistent = true;
1050 assert (r->r_state == RT_ADD);
1052 /* Enter the new object in the list of loaded objects. */
1053 l = _dl_new_object (realname, name, l_type, loader, mode, nsid);
1054 if (__builtin_expect (l == NULL, 0))
1059 errstring = N_("cannot create shared object descriptor");
1060 goto call_lose_errno;
1063 /* Extract the remaining details we need from the ELF header
1064 and then read in the program header table. */
1065 l->l_entry = header->e_entry;
1066 type = header->e_type;
1067 l->l_phnum = header->e_phnum;
1069 maplength = header->e_phnum * sizeof (ElfW(Phdr));
1070 if (header->e_phoff + maplength <= (size_t) fbp->len)
1071 phdr = (void *) (fbp->buf + header->e_phoff);
1074 phdr = alloca (maplength);
1075 __lseek (fd, header->e_phoff, SEEK_SET);
1076 if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
1078 errstring = N_("cannot read file data");
1079 goto call_lose_errno;
1083 /* On most platforms presume that PT_GNU_STACK is absent and the stack is
1084 * executable. Other platforms default to a nonexecutable stack and don't
1085 * need PT_GNU_STACK to do so. */
1086 uint_fast16_t stack_flags = DEFAULT_STACK_PERMS;
1089 /* Scan the program header table, collecting its load commands. */
1092 ElfW(Addr) mapstart, mapend, dataend, allocend;
1095 } loadcmds[l->l_phnum], *c;
1096 size_t nloadcmds = 0;
1097 bool has_holes = false;
1099 /* The struct is initialized to zero so this is not necessary:
1103 for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
1106 /* These entries tell us where to find things once the file's
1107 segments are mapped in. We record the addresses it says
1108 verbatim, and later correct for the run-time load address. */
1110 l->l_ld = (void *) ph->p_vaddr;
1111 l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
1115 l->l_phdr = (void *) ph->p_vaddr;
1119 /* A load command tells us to map in part of the file.
1120 We record the load commands and process them all later. */
1121 if (__builtin_expect ((ph->p_align & (GLRO(dl_pagesize) - 1)) != 0,
1124 errstring = N_("ELF load command alignment not page-aligned");
1127 if (__builtin_expect (((ph->p_vaddr - ph->p_offset)
1128 & (ph->p_align - 1)) != 0, 0))
1131 = N_("ELF load command address/offset not properly aligned");
1135 c = &loadcmds[nloadcmds++];
1136 c->mapstart = ph->p_vaddr & ~(GLRO(dl_pagesize) - 1);
1137 c->mapend = ((ph->p_vaddr + ph->p_filesz + GLRO(dl_pagesize) - 1)
1138 & ~(GLRO(dl_pagesize) - 1));
1139 c->dataend = ph->p_vaddr + ph->p_filesz;
1140 c->allocend = ph->p_vaddr + ph->p_memsz;
1141 c->mapoff = ph->p_offset & ~(GLRO(dl_pagesize) - 1);
1143 /* Determine whether there is a gap between the last segment
1145 if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
1148 /* Optimize a common case. */
1149 #if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
1150 c->prot = (PF_TO_PROT
1151 >> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
1154 if (ph->p_flags & PF_R)
1155 c->prot |= PROT_READ;
1156 if (ph->p_flags & PF_W)
1157 c->prot |= PROT_WRITE;
1158 if (ph->p_flags & PF_X)
1159 c->prot |= PROT_EXEC;
1164 if (ph->p_memsz == 0)
1165 /* Nothing to do for an empty segment. */
1168 l->l_tls_blocksize = ph->p_memsz;
1169 l->l_tls_align = ph->p_align;
1170 if (ph->p_align == 0)
1171 l->l_tls_firstbyte_offset = 0;
1173 l->l_tls_firstbyte_offset = ph->p_vaddr & (ph->p_align - 1);
1174 l->l_tls_initimage_size = ph->p_filesz;
1175 /* Since we don't know the load address yet only store the
1176 offset. We will adjust it later. */
1177 l->l_tls_initimage = (void *) ph->p_vaddr;
1179 /* If not loading the initial set of shared libraries,
1180 check whether we should permit loading a TLS segment. */
1181 if (__builtin_expect (l->l_type == lt_library, 1)
1182 /* If GL(dl_tls_dtv_slotinfo_list) == NULL, then rtld.c did
1183 not set up TLS data structures, so don't use them now. */
1184 || __builtin_expect (GL(dl_tls_dtv_slotinfo_list) != NULL, 1))
1186 /* Assign the next available module ID. */
1187 l->l_tls_modid = _dl_next_tls_modid ();
1192 if (l->l_prev == NULL || (mode & __RTLD_AUDIT) != 0)
1193 /* We are loading the executable itself when the dynamic linker
1194 was executed directly. The setup will happen later. */
1197 # ifdef _LIBC_REENTRANT
1198 /* In a static binary there is no way to tell if we dynamically
1199 loaded libpthread. */
1200 if (GL(dl_error_catch_tsd) == &_dl_initial_error_catch_tsd)
1204 /* We have not yet loaded libpthread.
1205 We can do the TLS setup right now! */
1209 /* The first call allocates TLS bookkeeping data structures.
1210 Then we allocate the TCB for the initial thread. */
1211 if (__builtin_expect (_dl_tls_setup (), 0)
1212 || __builtin_expect ((tcb = _dl_allocate_tls (NULL)) == NULL,
1217 cannot allocate TLS data structures for initial thread");
1221 /* Now we install the TCB in the thread register. */
1222 errstring = TLS_INIT_TP (tcb, 0);
1223 if (__builtin_expect (errstring == NULL, 1))
1225 /* Now we are all good. */
1226 l->l_tls_modid = ++GL(dl_tls_max_dtv_idx);
1230 /* The kernel is too old or somesuch. */
1232 _dl_deallocate_tls (tcb, 1);
1236 /* Uh-oh, the binary expects TLS support but we cannot
1239 errstring = N_("cannot handle TLS data");
1244 stack_flags = ph->p_flags;
1248 l->l_relro_addr = ph->p_vaddr;
1249 l->l_relro_size = ph->p_memsz;
1253 if (__builtin_expect (nloadcmds == 0, 0))
1255 /* This only happens for a bogus object that will be caught with
1256 another error below. But we don't want to go through the
1257 calculations below using NLOADCMDS - 1. */
1258 errstring = N_("object file has no loadable segments");
1262 /* Now process the load commands and map segments into memory. */
1265 /* Length of the sections to be loaded. */
1266 maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
1268 if (__builtin_expect (type, ET_DYN) == ET_DYN)
1270 /* This is a position-independent shared object. We can let the
1271 kernel map it anywhere it likes, but we must have space for all
1272 the segments in their specified positions relative to the first.
1273 So we map the first segment without MAP_FIXED, but with its
1274 extent increased to cover all the segments. Then we remove
1275 access from excess portion, and there is known sufficient space
1276 there to remap from the later segments.
1278 As a refinement, sometimes we have an address that we would
1279 prefer to map such objects at; but this is only a preference,
1280 the OS can do whatever it likes. */
1282 mappref = (ELF_PREFERRED_ADDRESS (loader, maplength,
1283 c->mapstart & GLRO(dl_use_load_bias))
1284 - MAP_BASE_ADDR (l));
1286 /* Remember which part of the address space this object uses. */
1287 l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
1291 if (__builtin_expect ((void *) l->l_map_start == MAP_FAILED, 0))
1294 errstring = N_("failed to map segment from shared object");
1295 goto call_lose_errno;
1298 l->l_map_end = l->l_map_start + maplength;
1299 l->l_addr = l->l_map_start - c->mapstart;
1302 /* Change protection on the excess portion to disallow all access;
1303 the portions we do not remap later will be inaccessible as if
1304 unallocated. Then jump into the normal segment-mapping loop to
1305 handle the portion of the segment past the end of the file
1307 __mprotect ((caddr_t) (l->l_addr + c->mapend),
1308 loadcmds[nloadcmds - 1].mapstart - c->mapend,
1311 l->l_contiguous = 1;
1316 /* This object is loaded at a fixed address. This must never
1317 happen for objects loaded with dlopen(). */
1318 if (__builtin_expect ((mode & __RTLD_OPENEXEC) == 0, 0))
1320 errstring = N_("cannot dynamically load executable");
1324 /* Notify ELF_PREFERRED_ADDRESS that we have to load this one
1326 ELF_FIXED_ADDRESS (loader, c->mapstart);
1329 /* Remember which part of the address space this object uses. */
1330 l->l_map_start = c->mapstart + l->l_addr;
1331 l->l_map_end = l->l_map_start + maplength;
1332 l->l_contiguous = !has_holes;
1334 while (c < &loadcmds[nloadcmds])
1336 if (c->mapend > c->mapstart
1337 /* Map the segment contents from the file. */
1338 && (__mmap ((void *) (l->l_addr + c->mapstart),
1339 c->mapend - c->mapstart, c->prot,
1340 MAP_FIXED|MAP_COPY|MAP_FILE,
1346 if (c->prot & PROT_EXEC)
1347 l->l_text_end = l->l_addr + c->mapend;
1350 && c->mapoff <= header->e_phoff
1351 && ((size_t) (c->mapend - c->mapstart + c->mapoff)
1352 >= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
1353 /* Found the program header in this segment. */
1354 l->l_phdr = (void *) (uintptr_t) (c->mapstart + header->e_phoff
1357 if (c->allocend > c->dataend)
1359 /* Extra zero pages should appear at the end of this segment,
1360 after the data mapped from the file. */
1361 ElfW(Addr) zero, zeroend, zeropage;
1363 zero = l->l_addr + c->dataend;
1364 zeroend = l->l_addr + c->allocend;
1365 zeropage = ((zero + GLRO(dl_pagesize) - 1)
1366 & ~(GLRO(dl_pagesize) - 1));
1368 if (zeroend < zeropage)
1369 /* All the extra data is in the last page of the segment.
1370 We can just zero it. */
1373 if (zeropage > zero)
1375 /* Zero the final part of the last page of the segment. */
1376 if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
1379 if (__mprotect ((caddr_t) (zero
1380 & ~(GLRO(dl_pagesize) - 1)),
1381 GLRO(dl_pagesize), c->prot|PROT_WRITE) < 0)
1383 errstring = N_("cannot change memory protections");
1384 goto call_lose_errno;
1387 memset ((void *) zero, '\0', zeropage - zero);
1388 if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0))
1389 __mprotect ((caddr_t) (zero & ~(GLRO(dl_pagesize) - 1)),
1390 GLRO(dl_pagesize), c->prot);
1393 if (zeroend > zeropage)
1395 /* Map the remaining zero pages in from the zero fill FD. */
1397 mapat = __mmap ((caddr_t) zeropage, zeroend - zeropage,
1398 c->prot, MAP_ANON|MAP_PRIVATE|MAP_FIXED,
1400 if (__builtin_expect (mapat == MAP_FAILED, 0))
1402 errstring = N_("cannot map zero-fill pages");
1403 goto call_lose_errno;
1414 if (__builtin_expect (type == ET_DYN, 0))
1416 errstring = N_("object file has no dynamic section");
1421 l->l_ld = (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);
1423 elf_get_dynamic_info (l, NULL);
1425 /* Make sure we are not dlopen'ing an object that has the
1426 DF_1_NOOPEN flag set. */
1427 if (__builtin_expect (l->l_flags_1 & DF_1_NOOPEN, 0)
1428 && (mode & __RTLD_DLOPEN))
1430 /* We are not supposed to load this object. Free all resources. */
1431 __munmap ((void *) l->l_map_start, l->l_map_end - l->l_map_start);
1433 if (!l->l_libname->dont_free)
1434 free (l->l_libname);
1436 if (l->l_phdr_allocated)
1437 free ((void *) l->l_phdr);
1439 errstring = N_("shared object cannot be dlopen()ed");
1443 if (l->l_phdr == NULL)
1445 /* The program header is not contained in any of the segments.
1446 We have to allocate memory ourself and copy it over from out
1448 ElfW(Phdr) *newp = (ElfW(Phdr) *) malloc (header->e_phnum
1449 * sizeof (ElfW(Phdr)));
1452 errstring = N_("cannot allocate memory for program header");
1453 goto call_lose_errno;
1456 l->l_phdr = memcpy (newp, phdr,
1457 (header->e_phnum * sizeof (ElfW(Phdr))));
1458 l->l_phdr_allocated = 1;
1461 /* Adjust the PT_PHDR value by the runtime load address. */
1462 l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_addr);
1464 if (__builtin_expect ((stack_flags &~ GL(dl_stack_flags)) & PF_X, 0))
1466 if (__builtin_expect (__check_caller (RETURN_ADDRESS (0), allow_ldso),
1469 errstring = N_("invalid caller");
1473 /* The stack is presently not executable, but this module
1474 requires that it be executable. We must change the
1475 protection of the variable which contains the flags used in
1476 the mprotect calls. */
1478 if ((mode & (__RTLD_DLOPEN | __RTLD_AUDIT)) == __RTLD_DLOPEN)
1480 const uintptr_t p = (uintptr_t) &__stack_prot & -GLRO(dl_pagesize);
1481 const size_t s = (uintptr_t) (&__stack_prot + 1) - p;
1483 struct link_map *const m = &GL(dl_rtld_map);
1484 const uintptr_t relro_end = ((m->l_addr + m->l_relro_addr
1486 & -GLRO(dl_pagesize));
1487 if (__builtin_expect (p + s <= relro_end, 1))
1489 /* The variable lies in the region protected by RELRO. */
1490 __mprotect ((void *) p, s, PROT_READ|PROT_WRITE);
1491 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1492 __mprotect ((void *) p, s, PROT_READ);
1495 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1499 __stack_prot |= PROT_READ|PROT_WRITE|PROT_EXEC;
1501 #ifdef check_consistency
1502 check_consistency ();
1505 errval = (*GL(dl_make_stack_executable_hook)) (stack_endp);
1509 cannot enable executable stack as shared object requires");
1514 /* Adjust the address of the TLS initialization image. */
1515 if (l->l_tls_initimage != NULL)
1516 l->l_tls_initimage = (char *) l->l_tls_initimage + l->l_addr;
1518 /* We are done mapping in the file. We no longer need the descriptor. */
1519 if (__builtin_expect (__close (fd) != 0, 0))
1521 errstring = N_("cannot close file descriptor");
1522 goto call_lose_errno;
1524 /* Signal that we closed the file. */
1527 if (l->l_type == lt_library && type == ET_EXEC)
1528 l->l_type = lt_executable;
1530 l->l_entry += l->l_addr;
1532 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0))
1533 _dl_debug_printf ("\
1534 dynamic: 0x%0*lx base: 0x%0*lx size: 0x%0*Zx\n\
1535 entry: 0x%0*lx phdr: 0x%0*lx phnum: %*u\n\n",
1536 (int) sizeof (void *) * 2,
1537 (unsigned long int) l->l_ld,
1538 (int) sizeof (void *) * 2,
1539 (unsigned long int) l->l_addr,
1540 (int) sizeof (void *) * 2, maplength,
1541 (int) sizeof (void *) * 2,
1542 (unsigned long int) l->l_entry,
1543 (int) sizeof (void *) * 2,
1544 (unsigned long int) l->l_phdr,
1545 (int) sizeof (void *) * 2, l->l_phnum);
1547 /* Set up the symbol hash table. */
1550 /* If this object has DT_SYMBOLIC set modify now its scope. We don't
1551 have to do this for the main map. */
1552 if ((mode & RTLD_DEEPBIND) == 0
1553 && __builtin_expect (l->l_info[DT_SYMBOLIC] != NULL, 0)
1554 && &l->l_searchlist != l->l_scope[0])
1556 /* Create an appropriate searchlist. It contains only this map.
1557 This is the definition of DT_SYMBOLIC in SysVr4. */
1558 l->l_symbolic_searchlist.r_list[0] = l;
1559 l->l_symbolic_searchlist.r_nlist = 1;
1561 /* Now move the existing entries one back. */
1562 memmove (&l->l_scope[1], &l->l_scope[0],
1563 (l->l_scope_max - 1) * sizeof (l->l_scope[0]));
1565 /* Now add the new entry. */
1566 l->l_scope[0] = &l->l_symbolic_searchlist;
1569 /* Remember whether this object must be initialized first. */
1570 if (l->l_flags_1 & DF_1_INITFIRST)
1571 GL(dl_initfirst) = l;
1573 /* Finally the file information. */
1574 l->l_dev = st.st_dev;
1575 l->l_ino = st.st_ino;
1577 /* When we profile the SONAME might be needed for something else but
1578 loading. Add it right away. */
1579 if (__builtin_expect (GLRO(dl_profile) != NULL, 0)
1580 && l->l_info[DT_SONAME] != NULL)
1581 add_name_to_object (l, ((const char *) D_PTR (l, l_info[DT_STRTAB])
1582 + l->l_info[DT_SONAME]->d_un.d_val));
1584 #ifdef DL_AFTER_LOAD
1588 /* Now that the object is fully initialized add it to the object list. */
1589 _dl_add_to_namespace_list (l, nsid);
1592 /* Auditing checkpoint: we have a new object. */
1593 if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
1594 && !GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing)
1596 struct audit_ifaces *afct = GLRO(dl_audit);
1597 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
1599 if (afct->objopen != NULL)
1601 l->l_audit[cnt].bindflags
1602 = afct->objopen (l, nsid, &l->l_audit[cnt].cookie);
1604 l->l_audit_any_plt |= l->l_audit[cnt].bindflags != 0;
1615 /* Print search path. */
1617 print_search_path (struct r_search_path_elem **list,
1618 const char *what, const char *name)
1620 char buf[max_dirnamelen + max_capstrlen];
1623 _dl_debug_printf (" search path=");
1625 while (*list != NULL && (*list)->what == what) /* Yes, ==. */
1627 char *endp = __mempcpy (buf, (*list)->dirname, (*list)->dirnamelen);
1630 for (cnt = 0; cnt < ncapstr; ++cnt)
1631 if ((*list)->status[cnt] != nonexisting)
1633 char *cp = __mempcpy (endp, capstr[cnt].str, capstr[cnt].len);
1634 if (cp == buf || (cp == buf + 1 && buf[0] == '/'))
1639 _dl_debug_printf_c (first ? "%s" : ":%s", buf);
1647 _dl_debug_printf_c ("\t\t(%s from file %s)\n", what,
1648 DSO_FILENAME (name));
1650 _dl_debug_printf_c ("\t\t(%s)\n", what);
1653 /* Open a file and verify it is an ELF file for this architecture. We
1654 ignore only ELF files for other architectures. Non-ELF files and
1655 ELF files with different header information cause fatal errors since
1656 this could mean there is something wrong in the installation and the
1657 user might want to know about this. */
1659 open_verify (const char *name, struct filebuf *fbp, struct link_map *loader,
1660 int whatcode, bool *found_other_class, bool free_name)
1662 /* This is the expected ELF header. */
1663 #define ELF32_CLASS ELFCLASS32
1664 #define ELF64_CLASS ELFCLASS64
1665 #ifndef VALID_ELF_HEADER
1666 # define VALID_ELF_HEADER(hdr,exp,size) (memcmp (hdr, exp, size) == 0)
1667 # define VALID_ELF_OSABI(osabi) (osabi == ELFOSABI_SYSV)
1668 # define VALID_ELF_ABIVERSION(osabi,ver) (ver == 0)
1669 #elif defined MORE_ELF_HEADER_DATA
1670 MORE_ELF_HEADER_DATA;
1672 static const unsigned char expected[EI_NIDENT] =
1674 [EI_MAG0] = ELFMAG0,
1675 [EI_MAG1] = ELFMAG1,
1676 [EI_MAG2] = ELFMAG2,
1677 [EI_MAG3] = ELFMAG3,
1678 [EI_CLASS] = ELFW(CLASS),
1679 [EI_DATA] = byteorder,
1680 [EI_VERSION] = EV_CURRENT,
1681 [EI_OSABI] = ELFOSABI_SYSV,
1686 ElfW(Word) vendorlen;
1690 } expected_note = { 4, 16, 1, "GNU" };
1691 /* Initialize it to make the compiler happy. */
1692 const char *errstring = NULL;
1696 /* Give the auditing libraries a chance. */
1697 if (__builtin_expect (GLRO(dl_naudit) > 0, 0) && whatcode != 0
1698 && loader->l_auditing == 0)
1700 struct audit_ifaces *afct = GLRO(dl_audit);
1701 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
1703 if (afct->objsearch != NULL)
1705 name = afct->objsearch (name, &loader->l_audit[cnt].cookie,
1708 /* Ignore the path. */
1717 /* Open the file. We always open files read-only. */
1718 int fd = __open (name, O_RDONLY | O_CLOEXEC);
1722 ElfW(Phdr) *phdr, *ph;
1723 ElfW(Word) *abi_note;
1724 unsigned int osversion;
1727 /* We successfully openened the file. Now verify it is a file
1731 assert (sizeof (fbp->buf) > sizeof (ElfW(Ehdr)));
1732 /* Read in the header. */
1735 ssize_t retlen = __libc_read (fd, fbp->buf + fbp->len,
1736 sizeof (fbp->buf) - fbp->len);
1741 while (__builtin_expect (fbp->len < sizeof (ElfW(Ehdr)), 0));
1743 /* This is where the ELF header is loaded. */
1744 ehdr = (ElfW(Ehdr) *) fbp->buf;
1746 /* Now run the tests. */
1747 if (__builtin_expect (fbp->len < (ssize_t) sizeof (ElfW(Ehdr)), 0))
1750 errstring = (errval == 0
1751 ? N_("file too short") : N_("cannot read file data"));
1755 char *realname = (char *) name;
1756 name = strdupa (realname);
1759 lose (errval, fd, name, NULL, NULL, errstring, NULL, 0);
1762 /* See whether the ELF header is what we expect. */
1763 if (__builtin_expect (! VALID_ELF_HEADER (ehdr->e_ident, expected,
1765 || !VALID_ELF_ABIVERSION (ehdr->e_ident[EI_OSABI],
1766 ehdr->e_ident[EI_ABIVERSION])
1767 || memcmp (&ehdr->e_ident[EI_PAD],
1769 EI_NIDENT - EI_PAD) != 0,
1772 /* Something is wrong. */
1773 const Elf32_Word *magp = (const void *) ehdr->e_ident;
1775 #if BYTE_ORDER == LITTLE_ENDIAN
1776 ((ELFMAG0 << (EI_MAG0 * 8)) |
1777 (ELFMAG1 << (EI_MAG1 * 8)) |
1778 (ELFMAG2 << (EI_MAG2 * 8)) |
1779 (ELFMAG3 << (EI_MAG3 * 8)))
1781 ((ELFMAG0 << (EI_MAG3 * 8)) |
1782 (ELFMAG1 << (EI_MAG2 * 8)) |
1783 (ELFMAG2 << (EI_MAG1 * 8)) |
1784 (ELFMAG3 << (EI_MAG0 * 8)))
1787 errstring = N_("invalid ELF header");
1788 else if (ehdr->e_ident[EI_CLASS] != ELFW(CLASS))
1790 /* This is not a fatal error. On architectures where
1791 32-bit and 64-bit binaries can be run this might
1793 *found_other_class = true;
1796 else if (ehdr->e_ident[EI_DATA] != byteorder)
1798 if (BYTE_ORDER == BIG_ENDIAN)
1799 errstring = N_("ELF file data encoding not big-endian");
1801 errstring = N_("ELF file data encoding not little-endian");
1803 else if (ehdr->e_ident[EI_VERSION] != EV_CURRENT)
1805 = N_("ELF file version ident does not match current one");
1806 /* XXX We should be able so set system specific versions which are
1808 else if (!VALID_ELF_OSABI (ehdr->e_ident[EI_OSABI]))
1809 errstring = N_("ELF file OS ABI invalid");
1810 else if (!VALID_ELF_ABIVERSION (ehdr->e_ident[EI_OSABI],
1811 ehdr->e_ident[EI_ABIVERSION]))
1812 errstring = N_("ELF file ABI version invalid");
1813 else if (memcmp (&ehdr->e_ident[EI_PAD], &expected[EI_PAD],
1814 EI_NIDENT - EI_PAD) != 0)
1815 errstring = N_("nonzero padding in e_ident");
1817 /* Otherwise we don't know what went wrong. */
1818 errstring = N_("internal error");
1823 if (__builtin_expect (ehdr->e_version, EV_CURRENT) != EV_CURRENT)
1825 errstring = N_("ELF file version does not match current one");
1828 if (! __builtin_expect (elf_machine_matches_host (ehdr), 1))
1830 else if (__builtin_expect (ehdr->e_type, ET_DYN) != ET_DYN
1831 && __builtin_expect (ehdr->e_type, ET_EXEC) != ET_EXEC)
1833 errstring = N_("only ET_DYN and ET_EXEC can be loaded");
1836 else if (__builtin_expect (ehdr->e_phentsize, sizeof (ElfW(Phdr)))
1837 != sizeof (ElfW(Phdr)))
1839 errstring = N_("ELF file's phentsize not the expected size");
1843 maplength = ehdr->e_phnum * sizeof (ElfW(Phdr));
1844 if (ehdr->e_phoff + maplength <= (size_t) fbp->len)
1845 phdr = (void *) (fbp->buf + ehdr->e_phoff);
1848 phdr = alloca (maplength);
1849 __lseek (fd, ehdr->e_phoff, SEEK_SET);
1850 if ((size_t) __libc_read (fd, (void *) phdr, maplength) != maplength)
1854 errstring = N_("cannot read file data");
1859 /* Check .note.ABI-tag if present. */
1860 for (ph = phdr; ph < &phdr[ehdr->e_phnum]; ++ph)
1861 if (ph->p_type == PT_NOTE && ph->p_filesz >= 32 && ph->p_align >= 4)
1863 ElfW(Addr) size = ph->p_filesz;
1865 if (ph->p_offset + size <= (size_t) fbp->len)
1866 abi_note = (void *) (fbp->buf + ph->p_offset);
1869 abi_note = alloca (size);
1870 __lseek (fd, ph->p_offset, SEEK_SET);
1871 if (__libc_read (fd, (void *) abi_note, size) != size)
1875 while (memcmp (abi_note, &expected_note, sizeof (expected_note)))
1877 #define ROUND(len) (((len) + sizeof (ElfW(Word)) - 1) & -sizeof (ElfW(Word)))
1878 ElfW(Addr) note_size = 3 * sizeof (ElfW(Word))
1879 + ROUND (abi_note[0])
1880 + ROUND (abi_note[1]);
1882 if (size - 32 < note_size)
1888 abi_note = (void *) abi_note + note_size;
1894 osversion = (abi_note[5] & 0xff) * 65536
1895 + (abi_note[6] & 0xff) * 256
1896 + (abi_note[7] & 0xff);
1897 if (abi_note[4] != __ABI_TAG_OS
1898 || (GLRO(dl_osversion) && GLRO(dl_osversion) < osversion))
1902 __set_errno (ENOENT);
1913 /* Try to open NAME in one of the directories in *DIRSP.
1914 Return the fd, or -1. If successful, fill in *REALNAME
1915 with the malloc'd full directory name. If it turns out
1916 that none of the directories in *DIRSP exists, *DIRSP is
1917 replaced with (void *) -1, and the old value is free()d
1918 if MAY_FREE_DIRS is true. */
1921 open_path (const char *name, size_t namelen, int secure,
1922 struct r_search_path_struct *sps, char **realname,
1923 struct filebuf *fbp, struct link_map *loader, int whatcode,
1924 bool *found_other_class)
1926 struct r_search_path_elem **dirs = sps->dirs;
1929 const char *current_what = NULL;
1932 if (__builtin_expect (dirs == NULL, 0))
1933 /* We're called before _dl_init_paths when loading the main executable
1934 given on the command line when rtld is run directly. */
1937 buf = alloca (max_dirnamelen + max_capstrlen + namelen);
1940 struct r_search_path_elem *this_dir = *dirs;
1947 /* If we are debugging the search for libraries print the path
1948 now if it hasn't happened now. */
1949 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0)
1950 && current_what != this_dir->what)
1952 current_what = this_dir->what;
1953 print_search_path (dirs, current_what, this_dir->where);
1956 edp = (char *) __mempcpy (buf, this_dir->dirname, this_dir->dirnamelen);
1957 for (cnt = 0; fd == -1 && cnt < ncapstr; ++cnt)
1959 /* Skip this directory if we know it does not exist. */
1960 if (this_dir->status[cnt] == nonexisting)
1964 ((char *) __mempcpy (__mempcpy (edp, capstr[cnt].str,
1969 /* Print name we try if this is wanted. */
1970 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
1971 _dl_debug_printf (" trying file=%s\n", buf);
1973 fd = open_verify (buf, fbp, loader, whatcode, found_other_class,
1975 if (this_dir->status[cnt] == unknown)
1978 this_dir->status[cnt] = existing;
1979 /* Do not update the directory information when loading
1980 auditing code. We must try to disturb the program as
1981 little as possible. */
1982 else if (loader == NULL
1983 || GL(dl_ns)[loader->l_ns]._ns_loaded->l_auditing == 0)
1985 /* We failed to open machine dependent library. Let's
1986 test whether there is any directory at all. */
1989 buf[buflen - namelen - 1] = '\0';
1991 if (__xstat64 (_STAT_VER, buf, &st) != 0
1992 || ! S_ISDIR (st.st_mode))
1993 /* The directory does not exist or it is no directory. */
1994 this_dir->status[cnt] = nonexisting;
1996 this_dir->status[cnt] = existing;
2000 /* Remember whether we found any existing directory. */
2001 here_any |= this_dir->status[cnt] != nonexisting;
2003 if (fd != -1 && __builtin_expect (secure, 0)
2004 && INTUSE(__libc_enable_secure))
2006 /* This is an extra security effort to make sure nobody can
2007 preload broken shared objects which are in the trusted
2008 directories and so exploit the bugs. */
2011 if (__fxstat64 (_STAT_VER, fd, &st) != 0
2012 || (st.st_mode & S_ISUID) == 0)
2014 /* The shared object cannot be tested for being SUID
2015 or this bit is not set. In this case we must not
2019 /* We simply ignore the file, signal this by setting
2020 the error value which would have been set by `open'. */
2028 *realname = (char *) malloc (buflen);
2029 if (*realname != NULL)
2031 memcpy (*realname, buf, buflen);
2036 /* No memory for the name, we certainly won't be able
2037 to load and link it. */
2042 if (here_any && (err = errno) != ENOENT && err != EACCES)
2043 /* The file exists and is readable, but something went wrong. */
2046 /* Remember whether we found anything. */
2049 while (*++dirs != NULL);
2051 /* Remove the whole path if none of the directories exists. */
2052 if (__builtin_expect (! any, 0))
2054 /* Paths which were allocated using the minimal malloc() in ld.so
2055 must not be freed using the general free() in libc. */
2059 /* rtld_search_dirs is attribute_relro, therefore avoid writing
2061 if (sps != &rtld_search_dirs)
2062 sps->dirs = (void *) -1;
2068 /* Map in the shared object file NAME. */
2072 _dl_map_object (struct link_map *loader, const char *name,
2073 int type, int trace_mode, int mode, Lmid_t nsid)
2082 assert (nsid < GL(dl_nns));
2084 /* Look for this name among those already loaded. */
2085 for (l = GL(dl_ns)[nsid]._ns_loaded; l; l = l->l_next)
2087 /* If the requested name matches the soname of a loaded object,
2088 use that object. Elide this check for names that have not
2090 if (__builtin_expect (l->l_faked, 0) != 0
2091 || __builtin_expect (l->l_removed, 0) != 0)
2093 if (!_dl_name_match_p (name, l))
2097 if (__builtin_expect (l->l_soname_added, 1)
2098 || l->l_info[DT_SONAME] == NULL)
2101 soname = ((const char *) D_PTR (l, l_info[DT_STRTAB])
2102 + l->l_info[DT_SONAME]->d_un.d_val);
2103 if (strcmp (name, soname) != 0)
2106 /* We have a match on a new name -- cache it. */
2107 add_name_to_object (l, soname);
2108 l->l_soname_added = 1;
2111 /* We have a match. */
2115 /* Display information if we are debugging. */
2116 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_FILES, 0)
2118 _dl_debug_printf ((mode & __RTLD_CALLMAP) == 0
2119 ? "\nfile=%s [%lu]; needed by %s [%lu]\n"
2120 : "\nfile=%s [%lu]; dynamically loaded by %s [%lu]\n",
2121 name, nsid, DSO_FILENAME (loader->l_name), loader->l_ns);
2124 /* Give the auditing libraries a chance to change the name before we
2126 if (__builtin_expect (GLRO(dl_naudit) > 0, 0)
2127 && (loader == NULL || loader->l_auditing == 0))
2129 struct audit_ifaces *afct = GLRO(dl_audit);
2130 for (unsigned int cnt = 0; cnt < GLRO(dl_naudit); ++cnt)
2132 if (afct->objsearch != NULL)
2134 name = afct->objsearch (name, &loader->l_audit[cnt].cookie,
2138 /* Do not try anything further. */
2149 /* Will be true if we found a DSO which is of the other ELF class. */
2150 bool found_other_class = false;
2152 if (strchr (name, '/') == NULL)
2154 /* Search for NAME in several places. */
2156 size_t namelen = strlen (name) + 1;
2158 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
2159 _dl_debug_printf ("find library=%s [%lu]; searching\n", name, nsid);
2163 /* When the object has the RUNPATH information we don't use any
2165 if (loader == NULL || loader->l_info[DT_RUNPATH] == NULL)
2167 /* This is the executable's map (if there is one). Make sure that
2168 we do not look at it twice. */
2169 struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2170 bool did_main_map = false;
2172 /* First try the DT_RPATH of the dependent object that caused NAME
2173 to be loaded. Then that object's dependent, and on up. */
2174 for (l = loader; l; l = l->l_loader)
2175 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2177 fd = open_path (name, namelen, mode & __RTLD_SECURE,
2179 &realname, &fb, loader, LA_SER_RUNPATH,
2180 &found_other_class);
2184 did_main_map |= l == main_map;
2187 /* If dynamically linked, try the DT_RPATH of the executable
2188 itself. NB: we do this for lookups in any namespace. */
2189 if (fd == -1 && !did_main_map
2190 && main_map != NULL && main_map->l_type != lt_loaded
2191 && cache_rpath (main_map, &main_map->l_rpath_dirs, DT_RPATH,
2193 fd = open_path (name, namelen, mode & __RTLD_SECURE,
2194 &main_map->l_rpath_dirs,
2195 &realname, &fb, loader ?: main_map, LA_SER_RUNPATH,
2196 &found_other_class);
2199 /* Try the LD_LIBRARY_PATH environment variable. */
2200 if (fd == -1 && env_path_list.dirs != (void *) -1)
2201 fd = open_path (name, namelen, mode & __RTLD_SECURE, &env_path_list,
2203 loader ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded,
2204 LA_SER_LIBPATH, &found_other_class);
2206 /* Look at the RUNPATH information for this binary. */
2207 if (fd == -1 && loader != NULL
2208 && cache_rpath (loader, &loader->l_runpath_dirs,
2209 DT_RUNPATH, "RUNPATH"))
2210 fd = open_path (name, namelen, mode & __RTLD_SECURE,
2211 &loader->l_runpath_dirs, &realname, &fb, loader,
2212 LA_SER_RUNPATH, &found_other_class);
2216 && (__builtin_expect (! (mode & __RTLD_SECURE), 1)
2217 || ! INTUSE(__libc_enable_secure))
2218 && __builtin_expect (GLRO(dl_inhibit_cache) == 0, 1))
2220 /* Check the list of libraries in the file /etc/ld.so.cache,
2221 for compatibility with Linux's ldconfig program. */
2222 const char *cached = _dl_load_cache_lookup (name);
2227 // XXX Correct to unconditionally default to namespace 0?
2229 ?: GL(dl_ns)[LM_ID_BASE]._ns_loaded
2230 ?: &GL(dl_rtld_map));
2235 /* If the loader has the DF_1_NODEFLIB flag set we must not
2236 use a cache entry from any of these directories. */
2239 /* 'l' is always != NULL for dynamically linked objects. */
2242 __builtin_expect (l->l_flags_1 & DF_1_NODEFLIB, 0))
2244 const char *dirp = system_dirs;
2245 unsigned int cnt = 0;
2249 if (memcmp (cached, dirp, system_dirs_len[cnt]) == 0)
2251 /* The prefix matches. Don't use the entry. */
2256 dirp += system_dirs_len[cnt] + 1;
2259 while (cnt < nsystem_dirs_len);
2264 fd = open_verify (cached,
2265 &fb, loader ?: GL(dl_ns)[nsid]._ns_loaded,
2266 LA_SER_CONFIG, &found_other_class, false);
2267 if (__builtin_expect (fd != -1, 1))
2269 realname = local_strdup (cached);
2270 if (realname == NULL)
2281 /* Finally, try the default path. */
2283 && ((l = loader ?: GL(dl_ns)[nsid]._ns_loaded) == NULL
2284 || __builtin_expect (!(l->l_flags_1 & DF_1_NODEFLIB), 1))
2285 && rtld_search_dirs.dirs != (void *) -1)
2286 fd = open_path (name, namelen, mode & __RTLD_SECURE, &rtld_search_dirs,
2287 &realname, &fb, l, LA_SER_DEFAULT, &found_other_class);
2289 /* Add another newline when we are tracing the library loading. */
2290 if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_LIBS, 0))
2291 _dl_debug_printf ("\n");
2295 /* The path may contain dynamic string tokens. */
2297 ? expand_dynamic_string_token (loader, name, 0)
2298 : local_strdup (name));
2299 if (realname == NULL)
2303 fd = open_verify (realname, &fb,
2304 loader ?: GL(dl_ns)[nsid]._ns_loaded, 0,
2305 &found_other_class, true);
2306 if (__builtin_expect (fd, 0) == -1)
2314 /* In case the LOADER information has only been provided to get to
2315 the appropriate RUNPATH/RPATH information we do not need it
2317 if (mode & __RTLD_CALLMAP)
2320 if (__builtin_expect (fd, 0) == -1)
2323 && __builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_PRELINK, 0) == 0)
2325 /* We haven't found an appropriate library. But since we
2326 are only interested in the list of libraries this isn't
2327 so severe. Fake an entry with all the information we
2329 static const Elf_Symndx dummy_bucket = STN_UNDEF;
2331 /* Allocate a new object map. */
2332 if ((name_copy = local_strdup (name)) == NULL
2333 || (l = _dl_new_object (name_copy, name, type, loader,
2334 mode, nsid)) == NULL)
2337 _dl_signal_error (ENOMEM, name, NULL,
2338 N_("cannot create shared object descriptor"));
2340 /* Signal that this is a faked entry. */
2342 /* Since the descriptor is initialized with zero we do not
2344 l->l_reserved = 0; */
2345 l->l_buckets = &dummy_bucket;
2349 /* Enter the object in the object list. */
2350 _dl_add_to_namespace_list (l, nsid);
2354 else if (found_other_class)
2355 _dl_signal_error (0, name, NULL,
2356 ELFW(CLASS) == ELFCLASS32
2357 ? N_("wrong ELF class: ELFCLASS64")
2358 : N_("wrong ELF class: ELFCLASS32"));
2360 _dl_signal_error (errno, name, NULL,
2361 N_("cannot open shared object file"));
2364 void *stack_end = __libc_stack_end;
2365 return _dl_map_object_from_fd (name, fd, &fb, realname, loader, type, mode,
2372 _dl_rtld_di_serinfo (struct link_map *loader, Dl_serinfo *si, bool counting)
2380 unsigned int idx = 0;
2381 char *allocptr = (char *) &si->dls_serpath[si->dls_cnt];
2382 void add_path (const struct r_search_path_struct *sps, unsigned int flags)
2383 # define add_path(sps, flags) add_path(sps, 0) /* XXX */
2385 if (sps->dirs != (void *) -1)
2387 struct r_search_path_elem **dirs = sps->dirs;
2390 const struct r_search_path_elem *const r = *dirs++;
2394 si->dls_size += MAX (2, r->dirnamelen);
2398 Dl_serpath *const sp = &si->dls_serpath[idx++];
2399 sp->dls_name = allocptr;
2400 if (r->dirnamelen < 2)
2401 *allocptr++ = r->dirnamelen ? '/' : '.';
2403 allocptr = __mempcpy (allocptr,
2404 r->dirname, r->dirnamelen - 1);
2406 sp->dls_flags = flags;
2409 while (*dirs != NULL);
2413 /* When the object has the RUNPATH information we don't use any RPATHs. */
2414 if (loader->l_info[DT_RUNPATH] == NULL)
2416 /* First try the DT_RPATH of the dependent object that caused NAME
2417 to be loaded. Then that object's dependent, and on up. */
2419 struct link_map *l = loader;
2422 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2423 add_path (&l->l_rpath_dirs, XXX_RPATH);
2428 /* If dynamically linked, try the DT_RPATH of the executable itself. */
2429 if (loader->l_ns == LM_ID_BASE)
2431 l = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
2432 if (l != NULL && l->l_type != lt_loaded && l != loader)
2433 if (cache_rpath (l, &l->l_rpath_dirs, DT_RPATH, "RPATH"))
2434 add_path (&l->l_rpath_dirs, XXX_RPATH);
2438 /* Try the LD_LIBRARY_PATH environment variable. */
2439 add_path (&env_path_list, XXX_ENV);
2441 /* Look at the RUNPATH information for this binary. */
2442 if (cache_rpath (loader, &loader->l_runpath_dirs, DT_RUNPATH, "RUNPATH"))
2443 add_path (&loader->l_runpath_dirs, XXX_RUNPATH);
2446 Here is where ld.so.cache gets checked, but we don't have
2447 a way to indicate that in the results for Dl_serinfo. */
2449 /* Finally, try the default path. */
2450 if (!(loader->l_flags_1 & DF_1_NODEFLIB))
2451 add_path (&rtld_search_dirs, XXX_default);
2454 /* Count the struct size before the string area, which we didn't
2455 know before we completed dls_cnt. */
2456 si->dls_size += (char *) &si->dls_serpath[si->dls_cnt] - (char *) si;