2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1997 by Silicon Graphics. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
14 * Original author: Bill Janssen
15 * Heavily modified by Hans Boehm and others
19 * This is incredibly OS specific code for tracking down data sections in
20 * dynamic libraries. There appears to be no way of doing this quickly
21 * without groveling through undocumented data structures. We would argue
22 * that this is a bug in the design of the dlopen interface. THIS CODE
23 * MAY BREAK IN FUTURE OS RELEASES. If this matters to you, don't hesitate
24 * to let your vendor know ...
26 * None of this is safe with dlclose and incremental collection.
27 * But then not much of anything is safe in the presence of dlclose.
29 #if !defined(MACOS) && !defined(_WIN32_WCE)
30 # include <sys/types.h>
32 #include "private/gc_priv.h"
34 /* BTL: avoid circular redefinition of dlopen if SOLARIS_THREADS defined */
35 # if (defined(LINUX_THREADS) || defined(SOLARIS_THREADS) \
36 || defined(HPUX_THREADS) || defined(IRIX_THREADS)) && defined(dlopen) \
37 && !defined(GC_USE_LD_WRAP)
38 /* To support threads in Solaris, gc.h interposes on dlopen by */
39 /* defining "dlopen" to be "GC_dlopen", which is implemented below. */
40 /* However, both GC_FirstDLOpenedLinkMap() and GC_dlopen() use the */
41 /* real system dlopen() in their implementation. We first remove */
42 /* gc.h's dlopen definition and restore it later, after GC_dlopen(). */
44 # define GC_must_restore_redefined_dlopen
46 # undef GC_must_restore_redefined_dlopen
49 #if (defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE)) \
51 #if !defined(SUNOS4) && !defined(SUNOS5DL) && !defined(IRIX5) && \
52 !defined(MSWIN32) && !defined(MSWINCE) && \
53 !(defined(ALPHA) && defined(OSF1)) && \
54 !defined(HPUX) && !(defined(LINUX) && defined(__ELF__)) && \
55 !defined(RS6000) && !defined(SCO_ELF) && \
56 !(defined(NETBSD) && defined(__ELF__)) && !defined(HURD)
57 --> We only know how to find data segments of dynamic libraries for the
58 --> above. Additional SVR4 variants might not be too
72 /* struct link_map field overrides */
73 # define l_next lm_next
74 # define l_addr lm_addr
75 # define l_name lm_name
79 #if defined(SUNOS5DL) && !defined(USE_PROC_FOR_LIBRARIES)
85 static struct link_map *
86 GC_FirstDLOpenedLinkMap()
88 extern Elf32_Dyn _DYNAMIC;
91 static struct link_map * cachedResult = 0;
92 static Elf32_Dyn *dynStructureAddr = 0;
93 /* BTL: added to avoid Solaris 5.3 ld.so _DYNAMIC bug */
95 # ifdef SUNOS53_SHARED_LIB
96 /* BTL: Avoid the Solaris 5.3 bug that _DYNAMIC isn't being set */
97 /* up properly in dynamically linked .so's. This means we have */
98 /* to use its value in the set of original object files loaded */
99 /* at program startup. */
100 if( dynStructureAddr == 0 ) {
101 void* startupSyms = dlopen(0, RTLD_LAZY);
102 dynStructureAddr = (Elf32_Dyn*)dlsym(startupSyms, "_DYNAMIC");
105 dynStructureAddr = &_DYNAMIC;
108 if( dynStructureAddr == 0) {
111 if( cachedResult == 0 ) {
113 for( dp = ((Elf32_Dyn *)(&_DYNAMIC)); (tag = dp->d_tag) != 0; dp++ ) {
114 if( tag == DT_DEBUG ) {
116 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
117 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
125 #endif /* SUNOS5DL ... */
127 /* BTL: added to fix circular dlopen definition if SOLARIS_THREADS defined */
128 # if defined(GC_must_restore_redefined_dlopen)
129 # define dlopen GC_dlopen
132 #if defined(SUNOS4) && !defined(USE_PROC_FOR_LIBRARIES)
135 struct link_dynamic _DYNAMIC;
138 static struct link_map *
139 GC_FirstDLOpenedLinkMap()
141 extern struct link_dynamic _DYNAMIC;
143 if( &_DYNAMIC == 0) {
146 return(_DYNAMIC.ld_un.ld_1->ld_loaded);
149 /* Return the address of the ld.so allocated common symbol */
150 /* with the least address, or 0 if none. */
151 static ptr_t GC_first_common()
154 extern struct link_dynamic _DYNAMIC;
155 struct rtc_symb * curr_symbol;
157 if( &_DYNAMIC == 0) {
160 curr_symbol = _DYNAMIC.ldd -> ldd_cp;
161 for (; curr_symbol != 0; curr_symbol = curr_symbol -> rtc_next) {
163 || (ptr_t)(curr_symbol -> rtc_sp -> n_value) < result) {
164 result = (ptr_t)(curr_symbol -> rtc_sp -> n_value);
170 #endif /* SUNOS4 ... */
172 # if defined(SUNOS4) || defined(SUNOS5DL)
173 /* Add dynamic library data sections to the root set. */
174 # if !defined(PCR) && !defined(SOLARIS_THREADS) && defined(THREADS)
176 --> fix mutual exclusion with dlopen
177 # endif /* We assume M3 programs don't call dlopen for now */
180 # ifndef USE_PROC_FOR_LIBRARIES
181 void GC_register_dynamic_libraries()
183 struct link_map *lm = GC_FirstDLOpenedLinkMap();
186 for (lm = GC_FirstDLOpenedLinkMap();
187 lm != (struct link_map *) 0; lm = lm->l_next)
192 e = (struct exec *) lm->lm_addr;
194 ((char *) (N_DATOFF(*e) + lm->lm_addr)),
195 ((char *) (N_BSSADDR(*e) + e->a_bss + lm->lm_addr)),
201 unsigned long offset;
205 e = (Elf32_Ehdr *) lm->l_addr;
206 p = ((Elf32_Phdr *)(((char *)(e)) + e->e_phoff));
207 offset = ((unsigned long)(lm->l_addr));
208 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
209 switch( p->p_type ) {
212 if( !(p->p_flags & PF_W) ) break;
213 start = ((char *)(p->p_vaddr)) + offset;
229 static ptr_t common_start = 0;
231 extern ptr_t GC_find_limit();
233 if (common_start == 0) common_start = GC_first_common();
234 if (common_start != 0) {
235 common_end = GC_find_limit(common_start, TRUE);
236 GC_add_roots_inner((char *)common_start, (char *)common_end, TRUE);
242 # endif /* !USE_PROC ... */
245 #if defined(LINUX) && defined(__ELF__) || defined(SCO_ELF) || \
246 (defined(NETBSD) && defined(__ELF__)) || defined(HURD)
249 #ifdef USE_PROC_FOR_LIBRARIES
253 #include <sys/stat.h>
257 #define MAPS_BUF_SIZE (32*1024)
259 extern ssize_t GC_repeat_read(int fd, char *buf, size_t count);
260 /* Repeatedly read until buffer is filled, or EOF is encountered */
261 /* Defined in os_dep.c. */
263 static char *parse_map_entry(char *buf_ptr, word *start, word *end,
264 char *prot_buf, unsigned int *maj_dev);
266 void GC_register_dynamic_libraries()
272 char maps_temp[32768];
277 unsigned int maj_dev, min_dev;
278 word least_ha, greatest_ha;
280 word datastart = (word)(DATASTART);
282 /* Read /proc/self/maps */
283 /* Note that we may not allocate, and thus can't use stdio. */
284 f = open("/proc/self/maps", O_RDONLY);
285 if (-1 == f) ABORT("Couldn't open /proc/self/maps");
286 /* stat() doesn't work for /proc/self/maps, so we have to
287 read it to find out how large it is... */
290 result = GC_repeat_read(f, maps_temp, sizeof(maps_temp));
291 if (result <= 0) ABORT("Couldn't read /proc/self/maps");
293 } while (result == sizeof(maps_temp));
295 if (maps_size > sizeof(maps_temp)) {
296 /* If larger than our buffer, close and re-read it. */
298 f = open("/proc/self/maps", O_RDONLY);
299 if (-1 == f) ABORT("Couldn't open /proc/self/maps");
300 maps_buf = alloca(maps_size);
301 if (NULL == maps_buf) ABORT("/proc/self/maps alloca failed");
302 result = GC_repeat_read(f, maps_buf, maps_size);
303 if (result <= 0) ABORT("Couldn't read /proc/self/maps");
305 /* Otherwise use the fixed size buffer */
306 maps_buf = maps_temp;
310 maps_buf[result] = '\0';
312 /* Compute heap bounds. Should be done by add_to_heap? */
313 least_ha = (word)(-1);
315 for (i = 0; i < GC_n_heap_sects; ++i) {
316 word sect_start = (word)GC_heap_sects[i].hs_start;
317 word sect_end = sect_start + GC_heap_sects[i].hs_bytes;
318 if (sect_start < least_ha) least_ha = sect_start;
319 if (sect_end > greatest_ha) greatest_ha = sect_end;
321 if (greatest_ha < (word)GC_scratch_last_end_ptr)
322 greatest_ha = (word)GC_scratch_last_end_ptr;
325 buf_ptr = parse_map_entry(buf_ptr, &start, &end, prot_buf, &maj_dev);
326 if (buf_ptr == NULL) return;
328 if (prot_buf[1] == 'w') {
329 /* This is a writable mapping. Add it to */
330 /* the root set unless it is already otherwise */
332 if (start <= (word)GC_stackbottom && end >= (word)GC_stackbottom) {
333 /* Stack mapping; discard */
336 if (start <= datastart && end > datastart && maj_dev != 0) {
337 /* Main data segment; discard */
341 if (GC_segment_is_thread_stack(start, end)) continue;
343 /* The rest of this assumes that there is no mapping */
344 /* spanning the beginning of the data segment, or extending */
345 /* beyond the entire heap at both ends. */
346 /* Empirically these assumptions hold. */
348 if (start < (word)DATAEND && end > (word)DATAEND) {
349 /* Rld may use space at the end of the main data */
350 /* segment. Thus we add that in. */
351 start = (word)DATAEND;
353 if (start < least_ha && end > least_ha) {
356 if (start < greatest_ha && end > greatest_ha) {
359 if (start >= least_ha && end <= greatest_ha) continue;
360 GC_add_roots_inner((char *)start, (char *)end, TRUE);
366 // parse_map_entry parses an entry from /proc/self/maps so we can
367 // locate all writable data segments that belong to shared libraries.
368 // The format of one of these entries and the fields we care about
370 // XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
371 // ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
372 // start end prot maj_dev
375 // The parser is called with a pointer to the entry and the return value
376 // is either NULL or is advanced to the next entry(the byte after the
379 #define OFFSET_MAP_START 0
380 #define OFFSET_MAP_END 9
381 #define OFFSET_MAP_PROT 18
382 #define OFFSET_MAP_MAJDEV 32
384 static char *parse_map_entry(char *buf_ptr, word *start, word *end,
385 char *prot_buf, unsigned int *maj_dev)
391 if (buf_ptr == NULL || *buf_ptr == '\0') {
395 memcpy(prot_buf, buf_ptr+OFFSET_MAP_PROT, 4); // do the protections first
398 if (prot_buf[1] == 'w') { // we can skip all of this if it's not writable
401 buf_ptr[OFFSET_MAP_START+8] = '\0';
402 *start = strtoul(tok, NULL, 16);
404 tok = buf_ptr+OFFSET_MAP_END;
405 buf_ptr[OFFSET_MAP_END+8] = '\0';
406 *end = strtoul(tok, NULL, 16);
408 buf_ptr += OFFSET_MAP_MAJDEV;
410 while (*buf_ptr != ':') buf_ptr++;
412 *maj_dev = strtoul(tok, NULL, 16);
415 while (*buf_ptr && *buf_ptr++ != '\n');
420 #else /* !USE_PROC_FOR_LIBRARIES */
422 /* Dynamic loading code for Linux running ELF. Somewhat tested on
423 * Linux/x86, untested but hopefully should work on Linux/Alpha.
424 * This code was derived from the Solaris/ELF support. Thanks to
425 * whatever kind soul wrote that. - Patrick Bridges */
428 # include <sys/exec_elf.h>
434 /* Newer versions of Linux/Alpha and Linux/x86 define this macro. We
435 * define it for those older versions that don't. */
437 # if !defined(ELF_CLASS) || ELF_CLASS == ELFCLASS32
438 # define ElfW(type) Elf32_##type
440 # define ElfW(type) Elf64_##type
444 static struct link_map *
445 GC_FirstDLOpenedLinkMap()
448 # pragma weak _DYNAMIC
450 extern ElfW(Dyn) _DYNAMIC[];
453 static struct link_map *cachedResult = 0;
458 if( cachedResult == 0 ) {
460 for( dp = _DYNAMIC; (tag = dp->d_tag) != 0; dp++ ) {
461 if( tag == DT_DEBUG ) {
463 = ((struct r_debug *)(dp->d_un.d_ptr))->r_map;
464 if( lm != 0 ) cachedResult = lm->l_next; /* might be NIL */
473 void GC_register_dynamic_libraries()
475 struct link_map *lm = GC_FirstDLOpenedLinkMap();
478 for (lm = GC_FirstDLOpenedLinkMap();
479 lm != (struct link_map *) 0; lm = lm->l_next)
483 unsigned long offset;
487 e = (ElfW(Ehdr) *) lm->l_addr;
488 p = ((ElfW(Phdr) *)(((char *)(e)) + e->e_phoff));
489 offset = ((unsigned long)(lm->l_addr));
490 for( i = 0; i < (int)(e->e_phnum); ((i++),(p++)) ) {
491 switch( p->p_type ) {
494 if( !(p->p_flags & PF_W) ) break;
495 start = ((char *)(p->p_vaddr)) + offset;
496 GC_add_roots_inner(start, start + p->p_memsz, TRUE);
506 #endif /* !USE_PROC_FOR_LIBRARIES */
510 #if defined(IRIX5) || (defined(USE_PROC_FOR_LIBRARIES) && !defined(LINUX))
512 #include <sys/procfs.h>
513 #include <sys/stat.h>
517 #include <signal.h> /* Only for the following test. */
522 extern void * GC_roots_present();
523 /* The type is a lie, since the real type doesn't make sense here, */
524 /* and we only test for NULL. */
526 /* We use /proc to track down all parts of the address space that are */
527 /* mapped by the process, and throw out regions we know we shouldn't */
528 /* worry about. This may also work under other SVR4 variants. */
529 void GC_register_dynamic_libraries()
533 static prmap_t * addr_map = 0;
534 static int current_sz = 0; /* Number of records currently in addr_map */
535 static int needed_sz; /* Required size of addr_map */
538 register ptr_t start;
539 register ptr_t limit;
540 ptr_t heap_start = (ptr_t)HEAP_START;
541 ptr_t heap_end = heap_start;
545 # endif /* SUNOS5DL */
548 sprintf(buf, "/proc/%d", getpid());
549 /* The above generates a lint complaint, since pid_t varies. */
550 /* It's unclear how to improve this. */
551 fd = open(buf, O_RDONLY);
553 ABORT("/proc open failed");
556 if (ioctl(fd, PIOCNMAP, &needed_sz) < 0) {
557 GC_err_printf2("fd = %d, errno = %d\n", fd, errno);
558 ABORT("/proc PIOCNMAP ioctl failed");
560 if (needed_sz >= current_sz) {
561 current_sz = needed_sz * 2 + 1;
562 /* Expansion, plus room for 0 record */
563 addr_map = (prmap_t *)GC_scratch_alloc((word)
564 (current_sz * sizeof(prmap_t)));
566 if (ioctl(fd, PIOCMAP, addr_map) < 0) {
567 GC_err_printf4("fd = %d, errno = %d, needed_sz = %d, addr_map = 0x%X\n",
568 fd, errno, needed_sz, addr_map);
569 ABORT("/proc PIOCMAP ioctl failed");
571 if (GC_n_heap_sects > 0) {
572 heap_end = GC_heap_sects[GC_n_heap_sects-1].hs_start
573 + GC_heap_sects[GC_n_heap_sects-1].hs_bytes;
574 if (heap_end < GC_scratch_last_end_ptr) heap_end = GC_scratch_last_end_ptr;
576 for (i = 0; i < needed_sz; i++) {
577 flags = addr_map[i].pr_mflags;
578 if ((flags & (MA_BREAK | MA_STACK | MA_PHYS)) != 0) goto irrelevant;
579 if ((flags & (MA_READ | MA_WRITE)) != (MA_READ | MA_WRITE))
581 /* The latter test is empirically useless in very old Irix */
582 /* versions. Other than the */
583 /* main data and stack segments, everything appears to be */
584 /* mapped readable, writable, executable, and shared(!!). */
585 /* This makes no sense to me. - HB */
586 start = (ptr_t)(addr_map[i].pr_vaddr);
587 if (GC_roots_present(start)) goto irrelevant;
588 if (start < heap_end && start >= heap_start)
591 if (GC_is_thread_stack(start)) goto irrelevant;
592 # endif /* MMAP_STACKS */
594 limit = start + addr_map[i].pr_size;
595 /* The following seemed to be necessary for very old versions */
596 /* of Irix, but it has been reported to discard relevant */
597 /* segments under Irix 6.5. */
599 if (addr_map[i].pr_off == 0 && strncmp(start, ELFMAG, 4) == 0) {
600 /* Discard text segments, i.e. 0-offset mappings against */
601 /* executable files which appear to have ELF headers. */
604 # define MAP_IRR_SZ 10
605 static ptr_t map_irr[MAP_IRR_SZ];
606 /* Known irrelevant map entries */
607 static int n_irr = 0;
611 for (i = 0; i < n_irr; i++) {
612 if (map_irr[i] == start) goto irrelevant;
614 arg = (caddr_t)start;
615 obj = ioctl(fd, PIOCOPENM, &arg);
619 if ((buf.st_mode & 0111) != 0) {
620 if (n_irr < MAP_IRR_SZ) {
621 map_irr[n_irr++] = start;
628 GC_add_roots_inner(start, limit, TRUE);
631 /* Dont keep cached descriptor, for now. Some kernels don't like us */
632 /* to keep a /proc file descriptor around during kill -9. */
633 if (close(fd) < 0) ABORT("Couldnt close /proc file");
637 # endif /* USE_PROC || IRIX5 */
639 # if defined(MSWIN32) || defined(MSWINCE)
641 # define WIN32_LEAN_AND_MEAN
643 # include <windows.h>
646 /* We traverse the entire address space and register all segments */
647 /* that could possibly have been written to. */
649 extern GC_bool GC_is_heap_base (ptr_t p);
651 # ifdef WIN32_THREADS
652 extern void GC_get_next_stack(char *start, char **lo, char **hi);
653 void GC_cond_add_roots(char *base, char * limit)
655 char * curr_base = base;
656 char * next_stack_lo;
657 char * next_stack_hi;
659 if (base == limit) return;
661 GC_get_next_stack(curr_base, &next_stack_lo, &next_stack_hi);
662 if (next_stack_lo >= limit) break;
663 GC_add_roots_inner(curr_base, next_stack_lo, TRUE);
664 curr_base = next_stack_hi;
666 if (curr_base < limit) GC_add_roots_inner(curr_base, limit, TRUE);
669 void GC_cond_add_roots(char *base, char * limit)
673 = (char *) ((word)(&dummy) & ~(GC_sysinfo.dwAllocationGranularity-1));
674 if (base == limit) return;
675 if (limit > stack_top && base < GC_stackbottom) {
676 /* Part of the stack; ignore it. */
679 GC_add_roots_inner(base, limit, TRUE);
684 extern GC_bool GC_win32s;
687 void GC_register_dynamic_libraries()
689 MEMORY_BASIC_INFORMATION buf;
694 char * limit, * new_limit;
697 if (GC_win32s) return;
699 base = limit = p = GC_sysinfo.lpMinimumApplicationAddress;
700 # if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
701 /* Only the first 32 MB of address space belongs to the current process */
702 while (p < (LPVOID)0x02000000) {
703 result = VirtualQuery(p, &buf, sizeof(buf));
705 /* Page is free; advance to the next possible allocation base */
707 (((DWORD) p + GC_sysinfo.dwAllocationGranularity)
708 & ~(GC_sysinfo.dwAllocationGranularity-1));
711 while (p < GC_sysinfo.lpMaximumApplicationAddress) {
712 result = VirtualQuery(p, &buf, sizeof(buf));
715 if (result != sizeof(buf)) {
716 ABORT("Weird VirtualQuery result");
718 new_limit = (char *)p + buf.RegionSize;
719 protect = buf.Protect;
720 if (buf.State == MEM_COMMIT
721 && (protect == PAGE_EXECUTE_READWRITE
722 || protect == PAGE_READWRITE)
723 && !GC_is_heap_base(buf.AllocationBase)) {
724 if ((char *)p != limit) {
725 GC_cond_add_roots(base, limit);
731 if (p > (LPVOID)new_limit /* overflow */) break;
732 p = (LPVOID)new_limit;
734 GC_cond_add_roots(base, limit);
737 #endif /* MSWIN32 || MSWINCE */
739 #if defined(ALPHA) && defined(OSF1)
743 void GC_register_dynamic_libraries()
749 ldr_module_t moduleid = LDR_NULL_MODULE;
750 ldr_module_info_t moduleinfo;
751 size_t moduleinfosize = sizeof(moduleinfo);
752 size_t modulereturnsize;
756 ldr_region_info_t regioninfo;
757 size_t regioninfosize = sizeof(regioninfo);
758 size_t regionreturnsize;
760 /* Obtain id of this process */
761 mypid = ldr_my_process();
763 /* For each module */
766 /* Get the next (first) module */
767 status = ldr_next_module(mypid, &moduleid);
769 /* Any more modules? */
770 if (moduleid == LDR_NULL_MODULE)
771 break; /* No more modules */
773 /* Check status AFTER checking moduleid because */
774 /* of a bug in the non-shared ldr_next_module stub */
776 GC_printf1("dynamic_load: status = %ld\n", (long)status);
778 extern char *sys_errlist[];
781 if (errno <= sys_nerr) {
782 GC_printf1("dynamic_load: %s\n", (long)sys_errlist[errno]);
784 GC_printf1("dynamic_load: %d\n", (long)errno);
787 ABORT("ldr_next_module failed");
790 /* Get the module information */
791 status = ldr_inq_module(mypid, moduleid, &moduleinfo,
792 moduleinfosize, &modulereturnsize);
794 ABORT("ldr_inq_module failed");
796 /* is module for the main program (i.e. nonshared portion)? */
797 if (moduleinfo.lmi_flags & LDR_MAIN)
798 continue; /* skip the main module */
801 GC_printf("---Module---\n");
802 GC_printf("Module ID = %16ld\n", moduleinfo.lmi_modid);
803 GC_printf("Count of regions = %16d\n", moduleinfo.lmi_nregion);
804 GC_printf("flags for module = %16lx\n", moduleinfo.lmi_flags);
805 GC_printf("pathname of module = \"%s\"\n", moduleinfo.lmi_name);
808 /* For each region in this module */
809 for (region = 0; region < moduleinfo.lmi_nregion; region++) {
811 /* Get the region information */
812 status = ldr_inq_region(mypid, moduleid, region, ®ioninfo,
813 regioninfosize, ®ionreturnsize);
815 ABORT("ldr_inq_region failed");
817 /* only process writable (data) regions */
818 if (! (regioninfo.lri_prot & LDR_W))
822 GC_printf("--- Region ---\n");
823 GC_printf("Region number = %16ld\n",
824 regioninfo.lri_region_no);
825 GC_printf("Protection flags = %016x\n", regioninfo.lri_prot);
826 GC_printf("Virtual address = %16p\n", regioninfo.lri_vaddr);
827 GC_printf("Mapped address = %16p\n", regioninfo.lri_mapaddr);
828 GC_printf("Region size = %16ld\n", regioninfo.lri_size);
829 GC_printf("Region name = \"%s\"\n", regioninfo.lri_name);
832 /* register region as a garbage collection root */
834 (char *)regioninfo.lri_mapaddr,
835 (char *)regioninfo.lri_mapaddr + regioninfo.lri_size,
849 extern char *sys_errlist[];
852 void GC_register_dynamic_libraries()
855 int index = 1; /* Ordinal position in shared library search list */
856 struct shl_descriptor *shl_desc; /* Shared library info, see dl.h */
858 /* For each dynamic library loaded */
861 /* Get info about next shared library */
862 status = shl_get(index, &shl_desc);
864 /* Check if this is the end of the list or if some error occured */
867 /* I've seen errno values of 0. The man page is not clear */
868 /* as to whether errno should get set on a -1 return. */
871 if (errno == EINVAL) {
872 break; /* Moved past end of shared library list --> finished */
874 if (errno <= sys_nerr) {
875 GC_printf1("dynamic_load: %s\n", (long) sys_errlist[errno]);
877 GC_printf1("dynamic_load: %d\n", (long) errno);
879 ABORT("shl_get failed");
885 GC_printf0("---Shared library---\n");
886 GC_printf1("\tfilename = \"%s\"\n", shl_desc->filename);
887 GC_printf1("\tindex = %d\n", index);
888 GC_printf1("\thandle = %08x\n",
889 (unsigned long) shl_desc->handle);
890 GC_printf1("\ttext seg. start = %08x\n", shl_desc->tstart);
891 GC_printf1("\ttext seg. end = %08x\n", shl_desc->tend);
892 GC_printf1("\tdata seg. start = %08x\n", shl_desc->dstart);
893 GC_printf1("\tdata seg. end = %08x\n", shl_desc->dend);
894 GC_printf1("\tref. count = %lu\n", shl_desc->ref_count);
897 /* register shared library's data segment as a garbage collection root */
898 GC_add_roots_inner((char *) shl_desc->dstart,
899 (char *) shl_desc->dend, TRUE);
909 #include <sys/errno.h>
910 void GC_register_dynamic_libraries()
917 ldibuf = alloca(ldibuflen = 8192);
919 while ( (len = loadquery(L_GETINFO,ldibuf,ldibuflen)) < 0) {
920 if (errno != ENOMEM) {
921 ABORT("loadquery failed");
923 ldibuf = alloca(ldibuflen *= 2);
926 ldi = (struct ld_info *)ldibuf;
928 len = ldi->ldinfo_next;
931 (unsigned long)ldi->ldinfo_dataorg
932 + ldi->ldinfo_datasize,
934 ldi = len ? (struct ld_info *)((char *)ldi + len) : 0;
941 #else /* !DYNAMIC_LOADING */
945 # include "il/PCR_IL.h"
946 # include "th/PCR_ThCtl.h"
947 # include "mm/PCR_MM.h"
949 void GC_register_dynamic_libraries()
951 /* Add new static data areas of dynamically loaded modules. */
953 PCR_IL_LoadedFile * p = PCR_IL_GetLastLoadedFile();
954 PCR_IL_LoadedSegment * q;
956 /* Skip uncommited files */
957 while (p != NIL && !(p -> lf_commitPoint)) {
958 /* The loading of this file has not yet been committed */
959 /* Hence its description could be inconsistent. */
960 /* Furthermore, it hasn't yet been run. Hence its data */
961 /* segments can't possibly reference heap allocated */
965 for (; p != NIL; p = p -> lf_prev) {
966 for (q = p -> lf_ls; q != NIL; q = q -> ls_next) {
967 if ((q -> ls_flags & PCR_IL_SegFlags_Traced_MASK)
968 == PCR_IL_SegFlags_Traced_on) {
970 ((char *)(q -> ls_addr),
971 (char *)(q -> ls_addr) + q -> ls_bytes,
982 void GC_register_dynamic_libraries(){}
984 int GC_no_dynamic_loading;
987 #endif /* !DYNAMIC_LOADING */