1 /* Locating objects in the process image. ld.so implementation.
2 Copyright (C) 2021-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
20 #include <atomic_wide_counter.h>
21 #include <dl-find_object.h>
29 /* Fallback implementation of _dl_find_object. It uses a linear
30 search, needs locking, and is not async-signal-safe. It is used in
31 _dl_find_object prior to initialization, when called from audit
32 modules. It also serves as the reference implementation for
35 _dl_find_object_slow (void *pc, struct dl_find_object *result)
37 ElfW(Addr) addr = (ElfW(Addr)) pc;
38 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
39 for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL;
41 if (addr >= l->l_map_start && addr < l->l_map_end
42 && (l->l_contiguous || _dl_addr_inside_object (l, addr)))
44 assert (ns == l->l_ns);
45 struct dl_find_object_internal internal;
46 _dl_find_object_from_map (l, &internal);
47 _dl_find_object_to_external (&internal, result);
51 /* Object not found. */
55 /* Data for the main executable. There is usually a large gap between
56 the main executable and initially loaded shared objects. Record
57 the main executable separately, to increase the chance that the
58 range for the non-closeable mappings below covers only the shared
59 objects (and not also the gap between main executable and shared
61 static struct dl_find_object_internal _dlfo_main attribute_relro;
63 /* Data for initially loaded shared objects that cannot be unloaded.
64 (This may also contain non-contiguous mappings from the main
65 executable.) The mappings are stored in address order in the
66 _dlfo_nodelete_mappings array (containing
67 _dlfo_nodelete_mappings_size elements). It is not modified after
69 static uintptr_t _dlfo_nodelete_mappings_end attribute_relro;
70 static size_t _dlfo_nodelete_mappings_size attribute_relro;
71 static struct dl_find_object_internal *_dlfo_nodelete_mappings
74 /* Mappings created by dlopen can go away with dlclose, so a dynamic
75 data structure with some synchronization is needed. Individual
76 segments are similar to the _dlfo_nodelete_mappings array above.
77 The previous segment contains lower addresses and is at most half
78 as long. Checking the address of the base address of the first
79 element during a lookup can therefore approximate a binary search
80 over all segments, even though the data is not stored in one
83 During updates, the segments are overwritten in place, and a
84 software transactional memory construct (involving the
85 _dlfo_loaded_mappings_version variable) is used to detect
86 concurrent modification, and retry as necessary. The memory
87 allocations are never deallocated, but slots used for objects that
88 have been dlclose'd can be reused by dlopen. The memory can live
89 in the regular C malloc heap.
91 The segments are populated from the start of the list, with the
92 mappings with the highest address. Only if this segment is full,
93 previous segments are used for mappings at lower addresses. The
94 remaining segments are populated as needed, but after allocating
95 further segments, some of the initial segments (at the end of the
96 linked list) can be empty (with size 0).
98 Adding new elements to this data structure is another source of
99 quadratic behavior for dlopen. If the other causes of quadratic
100 behavior are eliminated, a more complicated data structure will be
102 struct dlfo_mappings_segment
104 /* The previous segment has lower base addresses. */
105 struct dlfo_mappings_segment *previous;
107 /* Used by __libc_freeres to deallocate malloc'ed memory. */
110 /* Count of array elements in use and allocated. */
114 struct dl_find_object_internal objects[];
117 /* To achieve async-signal-safety, two copies of the data structure
118 are used, so that a signal handler can still use this data even if
119 dlopen or dlclose modify the other copy. The the MSB in
120 _dlfo_loaded_mappings_version determines which array element is the
121 currently active region. */
122 static struct dlfo_mappings_segment *_dlfo_loaded_mappings[2];
124 /* Returns the number of actually used elements in all segments
127 _dlfo_mappings_segment_count_used (struct dlfo_mappings_segment *seg)
130 for (; seg != NULL && seg->size > 0; seg = seg->previous)
131 for (size_t i = 0; i < seg->size; ++i)
132 /* Exclude elements which have been dlclose'd. */
133 count += seg->objects[i].map != NULL;
137 /* Compute the total number of available allocated segments linked
140 _dlfo_mappings_segment_count_allocated (struct dlfo_mappings_segment *seg)
143 for (; seg != NULL; seg = seg->previous)
144 count += seg->allocated;
148 /* This is essentially an arbitrary value. dlopen allocates plenty of
149 memory anyway, so over-allocated a bit does not hurt. Not having
150 many small-ish segments helps to avoid many small binary searches.
151 Not using a power of 2 means that we do not waste an extra page
152 just for the malloc header if a mapped allocation is used in the
154 enum { dlfo_mappings_initial_segment_size = 63 };
156 /* Allocate an empty segment. This used for the first ever
158 static struct dlfo_mappings_segment *
159 _dlfo_mappings_segment_allocate_unpadded (size_t size)
161 if (size < dlfo_mappings_initial_segment_size)
162 size = dlfo_mappings_initial_segment_size;
163 /* No overflow checks here because the size is a mapping count, and
164 struct link_map is larger than what we allocate here. */
167 element_size = sizeof ((struct dlfo_mappings_segment) {}.objects[0])
169 size_t to_allocate = (sizeof (struct dlfo_mappings_segment)
170 + size * element_size);
171 struct dlfo_mappings_segment *result = malloc (to_allocate);
174 result->previous = NULL;
175 result->to_free = NULL; /* Minimal malloc memory cannot be freed. */
177 result->allocated = size;
182 /* Allocate an empty segment that is at least SIZE large. PREVIOUS
183 points to the chain of previously allocated segments and can be
185 static struct dlfo_mappings_segment *
186 _dlfo_mappings_segment_allocate (size_t size,
187 struct dlfo_mappings_segment * previous)
189 /* Exponential sizing policies, so that lookup approximates a binary
192 size_t minimum_growth;
193 if (previous == NULL)
194 minimum_growth = dlfo_mappings_initial_segment_size;
196 minimum_growth = 2* previous->allocated;
197 if (size < minimum_growth)
198 size = minimum_growth;
200 enum { cache_line_size_estimate = 128 };
201 /* No overflow checks here because the size is a mapping count, and
202 struct link_map is larger than what we allocate here. */
205 element_size = sizeof ((struct dlfo_mappings_segment) {}.objects[0])
207 size_t to_allocate = (sizeof (struct dlfo_mappings_segment)
208 + size * element_size
209 + 2 * cache_line_size_estimate);
210 char *ptr = malloc (to_allocate);
213 char *original_ptr = ptr;
214 /* Start and end at a (conservative) 128-byte cache line boundary.
215 Do not use memalign for compatibility with partially interposing
216 malloc implementations. */
217 char *end = PTR_ALIGN_DOWN (ptr + to_allocate, cache_line_size_estimate);
218 ptr = PTR_ALIGN_UP (ptr, cache_line_size_estimate);
219 struct dlfo_mappings_segment *result
220 = (struct dlfo_mappings_segment *) ptr;
221 result->previous = previous;
222 result->to_free = original_ptr;
224 /* We may have obtained slightly more space if malloc happened
225 to provide an over-aligned pointer. */
226 result->allocated = (((uintptr_t) (end - ptr)
227 - sizeof (struct dlfo_mappings_segment))
229 assert (result->allocated >= size);
233 /* Monotonic counter for software transactional memory. The lowest
234 bit indicates which element of the _dlfo_loaded_mappings contains
236 static __atomic_wide_counter _dlfo_loaded_mappings_version;
238 /* TM version at the start of the read operation. */
239 static inline uint64_t
240 _dlfo_read_start_version (void)
242 /* Acquire MO load synchronizes with the fences at the beginning and
243 end of the TM update region. */
244 return __atomic_wide_counter_load_acquire (&_dlfo_loaded_mappings_version);
247 /* Optimized variant of _dlfo_read_start_version which can be called
248 when the loader is write-locked. */
249 static inline uint64_t
250 _dlfo_read_version_locked (void)
252 return __atomic_wide_counter_load_relaxed (&_dlfo_loaded_mappings_version);
255 /* Update the version to reflect that an update is happening. This
256 does not change the bit that controls the active segment chain.
257 Returns the index of the currently active segment chain. */
258 static inline unsigned int
259 _dlfo_mappings_begin_update (void)
262 = __atomic_wide_counter_fetch_add_relaxed (&_dlfo_loaded_mappings_version,
264 /* Subsequent stores to the TM data must not be reordered before the
265 store above with the version update. */
266 atomic_thread_fence_release ();
270 /* Installs the just-updated version as the active version. */
272 _dlfo_mappings_end_update (void)
274 /* The previous writes to the TM data must not be reordered after
275 the version update below. */
276 atomic_thread_fence_release ();
277 __atomic_wide_counter_fetch_add_relaxed (&_dlfo_loaded_mappings_version,
280 /* Completes an in-place update without switching versions. */
282 _dlfo_mappings_end_update_no_switch (void)
284 /* The previous writes to the TM data must not be reordered after
285 the version update below. */
286 atomic_thread_fence_release ();
287 __atomic_wide_counter_fetch_add_relaxed (&_dlfo_loaded_mappings_version,
291 /* Return true if the read was successful, given the start
294 _dlfo_read_success (uint64_t start_version)
296 return _dlfo_read_start_version () == start_version;
299 /* Returns the active segment identified by the specified start
301 static struct dlfo_mappings_segment *
302 _dlfo_mappings_active_segment (uint64_t start_version)
304 return _dlfo_loaded_mappings[start_version & 1];
307 /* Searches PC amoung the address-sorted array [FIRST1, FIRST1 +
308 SIZE). Assumes PC >= FIRST1->map_start. Returns a pointer to the
309 element that contains PC, or NULL if there is no such element. */
310 static inline struct dl_find_object_internal *
311 _dlfo_lookup (uintptr_t pc, struct dl_find_object_internal *first1, size_t size)
313 struct dl_find_object_internal *end = first1 + size;
315 /* Search for a lower bound in first. */
316 struct dl_find_object_internal *first = first1;
319 size_t half = size >> 1;
320 struct dl_find_object_internal *middle = first + half;
321 if (middle->map_start < pc)
330 if (first != end && pc == first->map_start)
332 if (pc < first->map_end)
335 /* Zero-length mapping after dlclose. */
340 /* Check to see if PC is in the previous mapping. */
342 if (pc < first->map_end)
343 /* pc >= first->map_start implied by the search above. */
351 _dl_find_object (void *pc1, struct dl_find_object *result)
353 uintptr_t pc = (uintptr_t) pc1;
355 if (__glibc_unlikely (_dlfo_main.map_end == 0))
357 /* Not initialized. No locking is needed here because this can
358 only be called from audit modules, which cannot create
360 return _dl_find_object_slow (pc1, result);
363 /* Main executable. */
364 if (pc >= _dlfo_main.map_start && pc < _dlfo_main.map_end)
366 _dl_find_object_to_external (&_dlfo_main, result);
370 /* Other initially loaded objects. */
371 if (pc >= _dlfo_nodelete_mappings->map_start
372 && pc < _dlfo_nodelete_mappings_end)
374 struct dl_find_object_internal *obj
375 = _dlfo_lookup (pc, _dlfo_nodelete_mappings,
376 _dlfo_nodelete_mappings_size);
379 _dl_find_object_to_external (obj, result);
382 /* Fall through to the full search. The kernel may have mapped
383 the initial mappings with gaps that are later filled by
384 dlopen with other mappings. */
387 /* Handle audit modules, dlopen, dlopen objects. This uses software
388 transactional memory, with a retry loop in case the version
389 changes during execution. */
394 uint64_t start_version = _dlfo_read_start_version ();
396 /* The read through seg->previous assumes that the CPU
397 recognizes the load dependency, so that no invalid size
398 values is read. Furthermore, the code assumes that no
399 out-of-thin-air value for seg->size is observed. Together,
400 this ensures that the observed seg->size value is always less
401 than seg->allocated, so that _dlfo_mappings_index does not
402 read out-of-bounds. (This avoids intermediate TM version
403 verification. A concurrent version update will lead to
404 invalid lookup results, but not to out-of-memory access.)
406 Either seg == NULL or seg->size == 0 terminates the segment
407 list. _dl_find_object_update does not bother to clear the
408 size on earlier unused segments. */
409 for (struct dlfo_mappings_segment *seg
410 = _dlfo_mappings_active_segment (start_version);
411 seg != NULL && seg->size > 0; seg = seg->previous)
412 if (pc >= seg->objects[0].map_start)
414 /* PC may lie within this segment. If it is less than the
415 segment start address, it can only lie in a previous
416 segment, due to the base address sorting. */
417 struct dl_find_object_internal *obj
418 = _dlfo_lookup (pc, seg->objects, seg->size);
422 /* Found the right mapping. Copy out the data prior to
423 checking if the read transaction was successful. */
424 struct dl_find_object_internal copy = *obj;
425 if (_dlfo_read_success (start_version))
427 _dl_find_object_to_external (©, result);
431 /* Read transaction failure. */
436 /* PC is not covered by this mapping. */
437 if (_dlfo_read_success (start_version))
440 /* Read transaction failure. */
443 } /* if: PC might lie within the current seg. */
445 /* PC is not covered by any segment. */
446 if (_dlfo_read_success (start_version))
448 } /* Transaction retry loop. */
450 rtld_hidden_def (_dl_find_object)
452 /* _dlfo_process_initial is called twice. First to compute the array
453 sizes from the initial loaded mappings. Second to fill in the
454 bases and infos arrays with the (still unsorted) data. Returns the
455 number of loaded (non-nodelete) mappings. */
457 _dlfo_process_initial (void)
459 struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
462 if (!main_map->l_contiguous)
464 struct dl_find_object_internal dlfo;
465 _dl_find_object_from_map (main_map, &dlfo);
467 /* PT_LOAD segments for a non-contiguous are added to the
468 non-closeable mappings. */
469 for (const ElfW(Phdr) *ph = main_map->l_phdr,
470 *ph_end = main_map->l_phdr + main_map->l_phnum;
472 if (ph->p_type == PT_LOAD)
474 if (_dlfo_nodelete_mappings != NULL)
476 /* Second pass only. */
477 _dlfo_nodelete_mappings[nodelete] = dlfo;
478 _dlfo_nodelete_mappings[nodelete].map_start
479 = ph->p_vaddr + main_map->l_addr;
480 _dlfo_nodelete_mappings[nodelete].map_end
481 = _dlfo_nodelete_mappings[nodelete].map_start + ph->p_memsz;
488 for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
489 for (struct link_map *l = GL(dl_ns)[ns]._ns_loaded; l != NULL;
491 /* Skip the main map processed above, and proxy maps. */
492 if (l != main_map && l == l->l_real)
494 /* lt_library link maps are implicitly NODELETE. */
495 if (l->l_type == lt_library || l->l_nodelete_active)
497 if (_dlfo_nodelete_mappings != NULL)
498 /* Second pass only. */
499 _dl_find_object_from_map
500 (l, _dlfo_nodelete_mappings + nodelete);
503 else if (l->l_type == lt_loaded)
505 if (_dlfo_loaded_mappings[0] != NULL)
506 /* Second pass only. */
507 _dl_find_object_from_map
508 (l, &_dlfo_loaded_mappings[0]->objects[loaded]);
513 _dlfo_nodelete_mappings_size = nodelete;
517 /* Selection sort based on mapping start address. */
519 _dlfo_sort_mappings (struct dl_find_object_internal *objects, size_t size)
524 for (size_t i = 0; i < size - 1; ++i)
528 uintptr_t min_val = objects[i].map_start;
529 for (size_t j = i + 1; j < size; ++j)
530 if (objects[j].map_start < min_val)
533 min_val = objects[j].map_start;
536 /* Swap into place. */
537 struct dl_find_object_internal tmp = objects[min_idx];
538 objects[min_idx] = objects[i];
544 _dl_find_object_init (void)
546 /* Cover the main mapping. */
548 struct link_map *main_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
550 if (main_map->l_contiguous)
551 _dl_find_object_from_map (main_map, &_dlfo_main);
554 /* Non-contiguous main maps are handled in
555 _dlfo_process_initial. Mark as initialized, but not
556 coverying any valid PC. */
557 _dlfo_main.map_start = -1;
558 _dlfo_main.map_end = -1;
562 /* Allocate the data structures. */
563 size_t loaded_size = _dlfo_process_initial ();
564 _dlfo_nodelete_mappings = malloc (_dlfo_nodelete_mappings_size
565 * sizeof (*_dlfo_nodelete_mappings));
567 _dlfo_loaded_mappings[0]
568 = _dlfo_mappings_segment_allocate_unpadded (loaded_size);
569 if (_dlfo_nodelete_mappings == NULL
570 || (loaded_size > 0 && _dlfo_loaded_mappings[0] == NULL))
572 Fatal glibc error: cannot allocate memory for find-object data\n");
573 /* Fill in the data with the second call. */
574 _dlfo_nodelete_mappings_size = 0;
575 _dlfo_process_initial ();
577 /* Sort both arrays. */
578 if (_dlfo_nodelete_mappings_size > 0)
580 _dlfo_sort_mappings (_dlfo_nodelete_mappings,
581 _dlfo_nodelete_mappings_size);
582 size_t last_idx = _dlfo_nodelete_mappings_size - 1;
583 _dlfo_nodelete_mappings_end = _dlfo_nodelete_mappings[last_idx].map_end;
586 _dlfo_sort_mappings (_dlfo_loaded_mappings[0]->objects,
587 _dlfo_loaded_mappings[0]->size);
591 _dl_find_object_link_map_sort (struct link_map **loaded, size_t size)
593 /* Selection sort based on map_start. */
596 for (size_t i = 0; i < size - 1; ++i)
600 ElfW(Addr) min_val = loaded[i]->l_map_start;
601 for (size_t j = i + 1; j < size; ++j)
602 if (loaded[j]->l_map_start < min_val)
605 min_val = loaded[j]->l_map_start;
608 /* Swap into place. */
609 struct link_map *tmp = loaded[min_idx];
610 loaded[min_idx] = loaded[i];
615 /* Initializes the segment for writing. Returns the target write
616 index (plus 1) in this segment. The index is chosen so that a
617 partially filled segment still has data at index 0. */
619 _dlfo_update_init_seg (struct dlfo_mappings_segment *seg,
620 size_t remaining_to_add)
622 if (remaining_to_add < seg->allocated)
623 /* Partially filled segment. */
624 seg->size = remaining_to_add;
626 seg->size = seg->allocated;
630 /* Invoked from _dl_find_object_update after sorting. */
632 _dl_find_object_update_1 (struct link_map **loaded, size_t count)
634 int active_idx = _dlfo_read_version_locked () & 1;
636 struct dlfo_mappings_segment *current_seg
637 = _dlfo_loaded_mappings[active_idx];
638 size_t current_used = _dlfo_mappings_segment_count_used (current_seg);
640 struct dlfo_mappings_segment *target_seg
641 = _dlfo_loaded_mappings[!active_idx];
642 size_t remaining_to_add = current_used + count;
644 /* Ensure that the new segment chain has enough space. */
647 = _dlfo_mappings_segment_count_allocated (target_seg);
648 if (new_allocated < remaining_to_add)
650 size_t more = remaining_to_add - new_allocated;
651 target_seg = _dlfo_mappings_segment_allocate (more, target_seg);
652 if (target_seg == NULL)
653 /* Out of memory. Do not end the update and keep the
654 current version unchanged. */
657 /* Start update cycle. */
658 _dlfo_mappings_begin_update ();
660 /* The barrier ensures that a concurrent TM read or fork does
661 not see a partially initialized segment. */
662 atomic_store_release (&_dlfo_loaded_mappings[!active_idx], target_seg);
665 /* Start update cycle without allocation. */
666 _dlfo_mappings_begin_update ();
669 size_t target_seg_index1 = _dlfo_update_init_seg (target_seg,
672 /* Merge the current_seg segment list with the loaded array into the
673 target_set. Merging occurs backwards, in decreasing l_map_start
675 size_t loaded_index1 = count;
676 size_t current_seg_index1;
677 if (current_seg == NULL)
678 current_seg_index1 = 0;
680 current_seg_index1 = current_seg->size;
683 if (current_seg_index1 == 0)
685 /* Switch to the previous segment. */
686 if (current_seg != NULL)
687 current_seg = current_seg->previous;
688 if (current_seg != NULL)
690 current_seg_index1 = current_seg->size;
691 if (current_seg_index1 == 0)
692 /* No more data in previous segments. */
697 if (current_seg != NULL
698 && (current_seg->objects[current_seg_index1 - 1].map == NULL))
700 /* This mapping has been dlclose'd. Do not copy it. */
701 --current_seg_index1;
705 if (loaded_index1 == 0 && current_seg == NULL)
706 /* No more data in either source. */
709 /* Make room for another mapping. */
710 assert (remaining_to_add > 0);
711 if (target_seg_index1 == 0)
713 /* Switch segments and set the size of the segment. */
714 target_seg = target_seg->previous;
715 target_seg_index1 = _dlfo_update_init_seg (target_seg,
719 /* Determine where to store the data. */
720 struct dl_find_object_internal *dlfo
721 = &target_seg->objects[target_seg_index1 - 1];
723 if (loaded_index1 == 0
724 || (current_seg != NULL
725 && (loaded[loaded_index1 - 1]->l_map_start
726 < current_seg->objects[current_seg_index1 - 1].map_start)))
728 /* Prefer mapping in current_seg. */
729 assert (current_seg_index1 > 0);
730 *dlfo = current_seg->objects[current_seg_index1 - 1];
731 --current_seg_index1;
735 /* Prefer newly loaded link map. */
736 assert (loaded_index1 > 0);
737 _dl_find_object_from_map (loaded[loaded_index1 - 1], dlfo);
738 loaded[loaded_index1 - 1]->l_find_object_processed = 1;
742 /* Consume space in target segment. */
748 /* Everything has been added. */
749 assert (remaining_to_add == 0);
751 /* The segment must have been filled up to the beginning. */
752 assert (target_seg_index1 == 0);
754 /* Prevent searching further into unused segments. */
755 if (target_seg->previous != NULL)
756 target_seg->previous->size = 0;
758 _dlfo_mappings_end_update ();
763 _dl_find_object_update (struct link_map *new_map)
765 /* Copy the newly-loaded link maps into an array for sorting. */
767 for (struct link_map *l = new_map; l != NULL; l = l->l_next)
768 /* Skip proxy maps and already-processed maps. */
769 count += l == l->l_real && !l->l_find_object_processed;
770 struct link_map **map_array = malloc (count * sizeof (*map_array));
771 if (map_array == NULL)
775 for (struct link_map *l = new_map; l != NULL; l = l->l_next)
776 if (l == l->l_real && !l->l_find_object_processed)
782 _dl_find_object_link_map_sort (map_array, count);
783 bool ok = _dl_find_object_update_1 (map_array, count);
789 _dl_find_object_dlclose (struct link_map *map)
791 uint64_t start_version = _dlfo_read_version_locked ();
792 uintptr_t map_start = map->l_map_start;
795 /* Directly patch the size information in the mapping to mark it as
796 unused. See the parallel lookup logic in _dl_find_object. Do
797 not check for previous dlclose at the same mapping address
798 because that cannot happen (there would have to be an
799 intermediate dlopen, which drops size-zero mappings). */
800 for (struct dlfo_mappings_segment *seg
801 = _dlfo_mappings_active_segment (start_version);
802 seg != NULL && seg->size > 0; seg = seg->previous)
803 if (map_start >= seg->objects[0].map_start)
805 struct dl_find_object_internal *obj
806 = _dlfo_lookup (map_start, seg->objects, seg->size);
808 /* Ignore missing link maps because of potential shutdown
809 issues around __libc_freeres. */
812 /* The update happens in-place, but given that we do not use
813 atomic accesses on the read side, update the version around
814 the update to trigger re-validation in concurrent
816 _dlfo_mappings_begin_update ();
818 /* Mark as closed. */
819 obj->map_end = obj->map_start;
822 _dlfo_mappings_end_update_no_switch ();
828 _dl_find_object_freeres (void)
830 for (int idx = 0; idx < 2; ++idx)
832 for (struct dlfo_mappings_segment *seg = _dlfo_loaded_mappings[idx];
835 struct dlfo_mappings_segment *previous = seg->previous;
839 /* Stop searching in shared objects. */
840 _dlfo_loaded_mappings[idx] = 0;