+Tue Jan 9 16:10:26 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * elf/dl-load.c (_dl_map_object_from_fd): After mapping first
+ segment to find location, mprotect excess region to no access,
+ don't munmap it.
+
+ * sysdeps/mach/hurd/dl-sysdep.c (mmap): If vm_map returns
+ KERN_NO_SPACE for fixed location, deallocate it and try again.
+
Mon Jan 8 17:43:23 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
* locale/Makefile (lib-modules): Renamed hash to locfile-hash.
kernel map it anywhere it likes, but we must have space for all
the segments in their specified positions relative to the first.
So we map the first segment without MAP_FIXED, but with its
- extent increased to cover all the segments. Then we unmap the
- excess portion, and there is known sufficient space there to map
- the later segments. */
+ extent increased to cover all the segments. Then we remove
+ access from excess portion, and there is known sufficient space
+ there to remap from the later segments. */
caddr_t mapat;
mapat = map_segment (c->mapstart,
loadcmds[nloadcmds - 1].allocend - c->mapstart,
c->prot, 0, c->mapoff);
l->l_addr = (Elf32_Addr) mapat - c->mapstart;
- /* Unmap the excess portion, and then jump into the normal
- segment-mapping loop to handle the portion of the segment past
- the end of the file mapping. */
- munmap (mapat + c->mapend,
- loadcmds[nloadcmds - 1].allocend - c->mapend);
+ /* Change protection on the excess portion to disallow all access;
+ the portions we do not remap later will be inaccessible as if
+ unallocated. Then jump into the normal segment-mapping loop to
+ handle the portion of the segment past the end of the file
+ mapping. */
+ mprotect (mapat + c->mapend,
+ loadcmds[nloadcmds - 1].allocend - c->mapend,
+ 0);
goto postmap;
}
/* Operating system support for run-time dynamic linker. Hurd version.
-Copyright (C) 1995 Free Software Foundation, Inc.
+Copyright (C) 1995, 1996 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
flags & (MAP_COPY|MAP_PRIVATE),
vmprot, VM_PROT_ALL,
(flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY);
+ if (err == KERN_NO_SPACE && (flags & MAP_FIXED))
+ {
+ /* XXX this is not atomic as it is in unix! */
+ /* The region is already allocated; deallocate it first. */
+ err = __vm_deallocate (__mach_task_self (), mapaddr, len);
+ if (! err)
+ err = __vm_map (__mach_task_self (),
+ &mapaddr, (vm_size_t) len, 0 /*ELF_MACHINE_USER_ADDRESS_MASK*/,
+ !(flags & MAP_FIXED),
+ (mach_port_t) fd, (vm_offset_t) offset,
+ flags & (MAP_COPY|MAP_PRIVATE),
+ vmprot, VM_PROT_ALL,
+ (flags & MAP_SHARED)
+ ? VM_INHERIT_SHARE : VM_INHERIT_COPY);
+ }
+
return err ? (caddr_t) __hurd_fail (err) : (caddr_t) mapaddr;
}