1 /* Determine the virtual memory area of a given address.
2 Copyright (C) 2002-2021 Bruno Haible <bruno@clisp.org>
3 Copyright (C) 2003-2006 Paolo Bonzini <bonzini@gnu.org>
5 This program is free software: you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <https://www.gnu.org/licenses/>. */
26 /* =========================== stackvma-simple.c =========================== */
28 #if defined __linux__ || defined __ANDROID__ \
29 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
30 || defined __NetBSD__ \
31 || (defined __APPLE__ && defined __MACH__) \
32 || defined __sgi || defined __sun \
33 || defined __CYGWIN__ || defined __HAIKU__
35 /* This file contains the proximity test function for the simple cases, where
36 the OS has an API for enumerating the mapped ranges of virtual memory. */
38 # if STACK_DIRECTION < 0
40 /* Info about the gap between this VMA and the previous one.
41 addr must be < vma->start. */
43 simple_is_near_this (uintptr_t addr, struct vma_struct *vma)
45 return (vma->start - addr <= (vma->start - vma->prev_end) / 2);
49 # if STACK_DIRECTION > 0
51 /* Info about the gap between this VMA and the next one.
52 addr must be > vma->end - 1. */
54 simple_is_near_this (uintptr_t addr, struct vma_struct *vma)
56 return (addr - vma->end < (vma->next_start - vma->end) / 2);
63 /* =========================== stackvma-rofile.c =========================== */
64 /* Buffered read-only streams. */
66 #if defined __linux__ || defined __ANDROID__ \
67 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
68 || defined __NetBSD__ \
71 # include <errno.h> /* errno, EINTR */
72 # include <fcntl.h> /* open, O_RDONLY */
73 # include <stddef.h> /* size_t */
74 # include <unistd.h> /* getpagesize, lseek, read, close */
75 # include <sys/types.h>
76 # include <sys/mman.h> /* mmap, munmap */
78 # if defined __linux__ || defined __ANDROID__
79 # include <limits.h> /* PATH_MAX */
82 /* Buffered read-only streams.
83 We cannot use <stdio.h> here, because fopen() calls malloc(), and a malloc()
84 call may have been interrupted.
85 Also, we cannot use multiple read() calls, because if the buffer size is
86 smaller than the file's contents:
87 - On NetBSD, the second read() call would return 0, thus making the file
89 - On DragonFly BSD, the first read() call would fail with errno = EFBIG.
90 - On all platforms, if some other thread is doing memory allocations or
91 deallocations between two read() calls, there is a high risk that the
92 result of these two read() calls don't fit together, and as a
93 consequence we will parse gargage and either omit some VMAs or return
94 VMAs with nonsensical addresses.
95 So use mmap(), and ignore the resulting VMA.
96 The stack-allocated buffer cannot be too large, because this can be called
97 when we are in the context of an alternate stack of just SIGSTKSZ bytes. */
99 # if defined __linux__ || defined __ANDROID__
100 /* On Linux, if the file does not entirely fit into the buffer, the read()
101 function stops before the line that would come out truncated. The
102 maximum size of such a line is 73 + PATH_MAX bytes. To be sure that we
103 have read everything, we must verify that at least that many bytes are
104 left when read() returned. */
105 # define MIN_LEFTOVER (73 + PATH_MAX)
107 # define MIN_LEFTOVER 1
110 # if MIN_LEFTOVER < 1024
111 # define STACK_ALLOCATED_BUFFER_SIZE 1024
113 /* There is no point in using a stack-allocated buffer if it is too small
115 # define STACK_ALLOCATED_BUFFER_SIZE 1
123 /* These fields deal with allocation of the buffer. */
126 size_t auxmap_length;
127 uintptr_t auxmap_start;
128 uintptr_t auxmap_end;
129 char stack_allocated_buffer[STACK_ALLOCATED_BUFFER_SIZE];
132 /* Open a read-only file stream. */
134 rof_open (struct rofile *rof, const char *filename)
140 fd = open (filename, O_RDONLY);
145 /* Try the static buffer first. */
147 rof->buffer = rof->stack_allocated_buffer;
148 size = sizeof (rof->stack_allocated_buffer);
150 rof->auxmap_start = 0;
154 /* Attempt to read the contents in a single system call. */
155 if (size > MIN_LEFTOVER)
157 int n = read (fd, rof->buffer, size);
158 if (n < 0 && errno == EINTR)
160 # if defined __DragonFly__
161 if (!(n < 0 && errno == EFBIG))
167 if (n + MIN_LEFTOVER <= size)
169 /* The buffer was sufficiently large. */
171 # if defined __linux__ || defined __ANDROID__
172 /* On Linux, the read() call may stop even if the buffer was
173 large enough. We need the equivalent of full_read(). */
176 n = read (fd, rof->buffer + rof->filled, size - rof->filled);
177 if (n < 0 && errno == EINTR)
182 if (n + MIN_LEFTOVER > size - rof->filled)
183 /* Allocate a larger buffer. */
187 /* Reached the end of file. */
200 /* Allocate a larger buffer. */
203 pagesize = getpagesize ();
205 while (size <= MIN_LEFTOVER)
214 if (rof->auxmap != NULL)
215 munmap (rof->auxmap, rof->auxmap_length);
217 rof->auxmap = (void *) mmap ((void *) 0, size, PROT_READ | PROT_WRITE,
218 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
219 if (rof->auxmap == (void *) -1)
224 rof->auxmap_length = size;
225 rof->auxmap_start = (uintptr_t) rof->auxmap;
226 rof->auxmap_end = rof->auxmap_start + size;
227 rof->buffer = (char *) rof->auxmap;
230 if (lseek (fd, 0, SEEK_SET) < 0)
233 fd = open (filename, O_RDONLY);
241 if (rof->auxmap != NULL)
242 munmap (rof->auxmap, rof->auxmap_length);
246 /* Return the next byte from a read-only file stream without consuming it,
249 rof_peekchar (struct rofile *rof)
251 if (rof->position == rof->filled)
256 return (unsigned char) rof->buffer[rof->position];
259 /* Return the next byte from a read-only file stream, or -1 at EOF. */
261 rof_getchar (struct rofile *rof)
263 int c = rof_peekchar (rof);
269 /* Parse an unsigned hexadecimal number from a read-only file stream. */
271 rof_scanf_lx (struct rofile *rof, uintptr_t *valuep)
274 unsigned int numdigits = 0;
277 int c = rof_peekchar (rof);
278 if (c >= '0' && c <= '9')
279 value = (value << 4) + (c - '0');
280 else if (c >= 'A' && c <= 'F')
281 value = (value << 4) + (c - 'A' + 10);
282 else if (c >= 'a' && c <= 'f')
283 value = (value << 4) + (c - 'a' + 10);
295 /* Close a read-only file stream. */
297 rof_close (struct rofile *rof)
299 if (rof->auxmap != NULL)
300 munmap (rof->auxmap, rof->auxmap_length);
305 /* ========================== stackvma-vma-iter.c ========================== */
306 /* Iterate through the virtual memory areas of the current process,
307 by reading from the /proc file system. */
309 /* This code is a simplied copy (no handling of protection flags) of the
310 code in gnulib's lib/vma-iter.c. */
312 #if defined __linux__ || defined __ANDROID__ \
313 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
314 || defined __NetBSD__ \
315 || defined __CYGWIN__
317 /* Forward declarations. */
318 struct callback_locals;
319 static int callback (struct callback_locals *locals, uintptr_t start, uintptr_t end);
321 # if defined __linux__ || defined __ANDROID__ || (defined __FreeBSD_kernel__ && !defined __FreeBSD__) || defined __CYGWIN__
322 /* GNU/kFreeBSD mounts /proc as linprocfs, which looks like a Linux /proc
326 vma_iterate_proc (struct callback_locals *locals)
330 /* Open the current process' maps file. It describes one VMA per line. */
331 if (rof_open (&rof, "/proc/self/maps") >= 0)
333 uintptr_t auxmap_start = rof.auxmap_start;
334 uintptr_t auxmap_end = rof.auxmap_end;
338 uintptr_t start, end;
341 /* Parse one line. First start and end. */
342 if (!(rof_scanf_lx (&rof, &start) >= 0
343 && rof_getchar (&rof) == '-'
344 && rof_scanf_lx (&rof, &end) >= 0))
346 while (c = rof_getchar (&rof), c != -1 && c != '\n')
349 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
351 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
352 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
353 if (start < auxmap_start)
354 if (callback (locals, start, auxmap_start))
356 if (auxmap_end - 1 < end - 1)
357 if (callback (locals, auxmap_end, end))
362 if (callback (locals, start, end))
373 # elif defined __FreeBSD__ || defined __DragonFly__ || defined __NetBSD__
376 vma_iterate_proc (struct callback_locals *locals)
380 /* Open the current process' maps file. It describes one VMA per line.
382 Cf. <https://www.freebsd.org/cgi/cvsweb.cgi/src/sys/fs/procfs/procfs_map.c?annotate=HEAD>
383 On NetBSD, there are two such files:
384 - /proc/curproc/map in near-FreeBSD syntax,
385 - /proc/curproc/maps in Linux syntax.
386 Cf. <http://cvsweb.netbsd.org/bsdweb.cgi/src/sys/miscfs/procfs/procfs_map.c?rev=HEAD> */
387 if (rof_open (&rof, "/proc/curproc/map") >= 0)
389 uintptr_t auxmap_start = rof.auxmap_start;
390 uintptr_t auxmap_end = rof.auxmap_end;
394 uintptr_t start, end;
397 /* Parse one line. First start. */
398 if (!(rof_getchar (&rof) == '0'
399 && rof_getchar (&rof) == 'x'
400 && rof_scanf_lx (&rof, &start) >= 0))
402 while (c = rof_peekchar (&rof), c == ' ' || c == '\t')
405 if (!(rof_getchar (&rof) == '0'
406 && rof_getchar (&rof) == 'x'
407 && rof_scanf_lx (&rof, &end) >= 0))
409 while (c = rof_getchar (&rof), c != -1 && c != '\n')
412 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
414 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
415 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
416 if (start < auxmap_start)
417 if (callback (locals, start, auxmap_start))
419 if (auxmap_end - 1 < end - 1)
420 if (callback (locals, auxmap_end, end))
425 if (callback (locals, start, end))
438 # if (defined __FreeBSD_kernel__ || defined __FreeBSD__) && defined KERN_PROC_VMMAP /* FreeBSD >= 7.1 */
440 # include <sys/user.h> /* struct kinfo_vmentry */
441 # include <sys/sysctl.h> /* sysctl */
444 vma_iterate_bsd (struct callback_locals *locals)
446 /* Documentation: https://www.freebsd.org/cgi/man.cgi?sysctl(3) */
447 int info_path[] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid () };
452 unsigned long auxmap_start;
453 unsigned long auxmap_end;
459 if (sysctl (info_path, 4, NULL, &len, NULL, 0) < 0)
461 /* Allow for small variations over time. In a multithreaded program
462 new VMAs can be allocated at any moment. */
464 /* Allocate memneed bytes of memory.
465 We cannot use alloca here, because not much stack space is guaranteed.
466 We also cannot use malloc here, because a malloc() call may call mmap()
467 and thus pre-allocate available memory.
468 So use mmap(), and ignore the resulting VMA. */
469 pagesize = getpagesize ();
471 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
472 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
473 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
474 if (auxmap == (void *) -1)
476 auxmap_start = (unsigned long) auxmap;
477 auxmap_end = auxmap_start + memneed;
478 mem = (char *) auxmap;
479 if (sysctl (info_path, 4, mem, &len, NULL, 0) < 0)
481 munmap (auxmap, memneed);
488 struct kinfo_vmentry *kve = (struct kinfo_vmentry *) p;
489 unsigned long start = kve->kve_start;
490 unsigned long end = kve->kve_end;
491 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
493 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
494 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
495 if (start < auxmap_start)
496 if (callback (locals, start, auxmap_start))
498 if (auxmap_end - 1 < end - 1)
499 if (callback (locals, auxmap_end, end))
504 if (callback (locals, start, end))
507 p += kve->kve_structsize;
509 munmap (auxmap, memneed);
515 # define vma_iterate_bsd(locals) (-1)
520 /* Iterate over the virtual memory areas of the current process.
521 If such iteration is supported, the callback is called once for every
522 virtual memory area, in ascending order, with the following arguments:
523 - LOCALS is the same argument as passed to vma_iterate.
524 - START is the address of the first byte in the area, page-aligned.
525 - END is the address of the last byte in the area plus 1, page-aligned.
526 Note that it may be 0 for the last area in the address space.
527 If the callback returns 0, the iteration continues. If it returns 1,
528 the iteration terminates prematurely.
529 This function may open file descriptors, but does not call malloc().
530 Return 0 if all went well, or -1 in case of error. */
532 vma_iterate (struct callback_locals *locals)
534 # if defined __FreeBSD__
535 /* On FreeBSD with procfs (but not GNU/kFreeBSD, which uses linprocfs), the
536 function vma_iterate_proc does not return the virtual memory areas that
537 were created by anonymous mmap. See
538 <https://svnweb.freebsd.org/base/head/sys/fs/procfs/procfs_map.c?view=markup>
539 So use vma_iterate_proc only as a fallback. */
540 int retval = vma_iterate_bsd (locals);
544 return vma_iterate_proc (locals);
546 /* On the other platforms, try the /proc approach first, and the sysctl()
548 int retval = vma_iterate_proc (locals);
552 return vma_iterate_bsd (locals);
558 /* =========================== stackvma-mincore.c =========================== */
560 /* mincore() is a system call that allows to inquire the status of a
561 range of pages of virtual memory. In particular, it allows to inquire
562 whether a page is mapped at all (except on Mac OS X, where mincore
563 returns 0 even for unmapped addresses).
564 As of 2006, mincore() is supported by: possible bits:
565 - Linux, since Linux 2.4 and glibc 2.2, 1
566 - Solaris, since Solaris 9, 1
567 - MacOS X, since MacOS X 10.3 (at least), 1
568 - FreeBSD, since FreeBSD 6.0, MINCORE_{INCORE,REFERENCED,MODIFIED}
569 - NetBSD, since NetBSD 3.0 (at least), 1
570 - OpenBSD, since OpenBSD 2.6 (at least), 1
571 - AIX, since AIX 5.3, 1
574 However, while the API allows to easily determine the bounds of mapped
575 virtual memory, it does not make it easy to find the bounds of _unmapped_
576 virtual memory ranges. We try to work around this, but it may still be
579 #if defined __linux__ || defined __ANDROID__ \
580 || defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ \
581 || defined __NetBSD__ /* || defined __OpenBSD__ */ \
582 /* || (defined __APPLE__ && defined __MACH__) */ \
583 || defined _AIX || defined __sun
585 # include <unistd.h> /* getpagesize, mincore */
586 # include <sys/types.h>
587 # include <sys/mman.h> /* mincore */
589 /* The AIX declaration of mincore() uses 'caddr_t', whereas the other platforms
592 typedef caddr_t MINCORE_ADDR_T;
594 typedef void* MINCORE_ADDR_T;
597 /* The glibc and musl declaration of mincore() uses 'unsigned char *', whereas
598 the BSD declaration uses 'char *'. */
599 # if __GLIBC__ >= 2 || defined __linux__ || defined __ANDROID__
600 typedef unsigned char pageinfo_t;
602 typedef char pageinfo_t;
605 /* Cache for getpagesize(). */
606 static uintptr_t pagesize;
608 /* Initialize pagesize. */
612 pagesize = getpagesize ();
615 /* Test whether the page starting at ADDR is among the address range.
616 ADDR must be a multiple of pagesize. */
618 is_mapped (uintptr_t addr)
621 return mincore ((MINCORE_ADDR_T) addr, pagesize, vec) >= 0;
624 /* Assuming that the page starting at ADDR is among the address range,
625 return the start of its virtual memory range.
626 ADDR must be a multiple of pagesize. */
628 mapped_range_start (uintptr_t addr)
630 /* Use a moderately sized VEC here, small enough that it fits on the stack
631 (without requiring malloc). */
632 pageinfo_t vec[1024];
633 uintptr_t stepsize = sizeof (vec);
637 uintptr_t max_remaining;
642 max_remaining = addr / pagesize;
643 if (stepsize > max_remaining)
644 stepsize = max_remaining;
645 if (mincore ((MINCORE_ADDR_T) (addr - stepsize * pagesize),
646 stepsize * pagesize, vec) < 0)
647 /* Time to search in smaller steps. */
649 /* The entire range exists. Continue searching in large steps. */
650 addr -= stepsize * pagesize;
654 uintptr_t halfstepsize1;
655 uintptr_t halfstepsize2;
660 /* Here we know that less than stepsize pages exist starting at addr. */
661 halfstepsize1 = (stepsize + 1) / 2;
662 halfstepsize2 = stepsize / 2;
663 /* halfstepsize1 + halfstepsize2 = stepsize. */
665 if (mincore ((MINCORE_ADDR_T) (addr - halfstepsize1 * pagesize),
666 halfstepsize1 * pagesize, vec) < 0)
667 stepsize = halfstepsize1;
670 addr -= halfstepsize1 * pagesize;
671 stepsize = halfstepsize2;
676 /* Assuming that the page starting at ADDR is among the address range,
677 return the end of its virtual memory range + 1.
678 ADDR must be a multiple of pagesize. */
680 mapped_range_end (uintptr_t addr)
682 /* Use a moderately sized VEC here, small enough that it fits on the stack
683 (without requiring malloc). */
684 pageinfo_t vec[1024];
685 uintptr_t stepsize = sizeof (vec);
690 uintptr_t max_remaining;
692 if (addr == 0) /* wrapped around? */
695 max_remaining = (- addr) / pagesize;
696 if (stepsize > max_remaining)
697 stepsize = max_remaining;
698 if (mincore ((MINCORE_ADDR_T) addr, stepsize * pagesize, vec) < 0)
699 /* Time to search in smaller steps. */
701 /* The entire range exists. Continue searching in large steps. */
702 addr += stepsize * pagesize;
706 uintptr_t halfstepsize1;
707 uintptr_t halfstepsize2;
712 /* Here we know that less than stepsize pages exist starting at addr. */
713 halfstepsize1 = (stepsize + 1) / 2;
714 halfstepsize2 = stepsize / 2;
715 /* halfstepsize1 + halfstepsize2 = stepsize. */
717 if (mincore ((MINCORE_ADDR_T) addr, halfstepsize1 * pagesize, vec) < 0)
718 stepsize = halfstepsize1;
721 addr += halfstepsize1 * pagesize;
722 stepsize = halfstepsize2;
727 /* Determine whether an address range [ADDR1..ADDR2] is completely unmapped.
728 ADDR1 must be <= ADDR2. */
730 is_unmapped (uintptr_t addr1, uintptr_t addr2)
735 /* Round addr1 down. */
736 addr1 = (addr1 / pagesize) * pagesize;
737 /* Round addr2 up and turn it into an exclusive bound. */
738 addr2 = ((addr2 / pagesize) + 1) * pagesize;
740 /* This is slow: mincore() does not provide a way to determine the bounds
741 of the gaps directly. So we have to use mincore() on individual pages
742 over and over again. Only after we've verified that all pages are
743 unmapped, we know that the range is completely unmapped.
744 If we were to traverse the pages from bottom to top or from top to bottom,
745 it would be slow even in the average case. To speed up the search, we
746 exploit the fact that mapped memory ranges are larger than one page on
747 average, therefore we have good chances of hitting a mapped area if we
748 traverse only every second, or only fourth page, etc. This doesn't
749 decrease the worst-case runtime, only the average runtime. */
750 count = (addr2 - addr1) / pagesize;
751 /* We have to test is_mapped (addr1 + i * pagesize) for 0 <= i < count. */
752 for (stepsize = 1; stepsize < count; )
753 stepsize = 2 * stepsize;
756 uintptr_t addr_stepsize;
760 stepsize = stepsize / 2;
763 addr_stepsize = stepsize * pagesize;
764 for (i = stepsize, addr = addr1 + addr_stepsize;
766 i += 2 * stepsize, addr += 2 * addr_stepsize)
767 /* Here addr = addr1 + i * pagesize. */
768 if (is_mapped (addr))
774 # if STACK_DIRECTION < 0
776 /* Info about the gap between this VMA and the previous one.
777 addr must be < vma->start. */
779 mincore_is_near_this (uintptr_t addr, struct vma_struct *vma)
781 /* vma->start - addr <= (vma->start - vma->prev_end) / 2
782 is mathematically equivalent to
783 vma->prev_end <= 2 * addr - vma->start
784 <==> is_unmapped (2 * addr - vma->start, vma->start - 1).
785 But be careful about overflow: if 2 * addr - vma->start is negative,
786 we consider a tiny "guard page" mapping [0, 0] to be present around
787 NULL; it intersects the range (2 * addr - vma->start, vma->start - 1),
788 therefore return false. */
789 uintptr_t testaddr = addr - (vma->start - addr);
790 if (testaddr > addr) /* overflow? */
792 /* Here testaddr <= addr < vma->start. */
793 return is_unmapped (testaddr, vma->start - 1);
797 # if STACK_DIRECTION > 0
799 /* Info about the gap between this VMA and the next one.
800 addr must be > vma->end - 1. */
802 mincore_is_near_this (uintptr_t addr, struct vma_struct *vma)
804 /* addr - vma->end < (vma->next_start - vma->end) / 2
805 is mathematically equivalent to
806 vma->next_start > 2 * addr - vma->end
807 <==> is_unmapped (vma->end, 2 * addr - vma->end).
808 But be careful about overflow: if 2 * addr - vma->end is > ~0UL,
809 we consider a tiny "guard page" mapping [0, 0] to be present around
810 NULL; it intersects the range (vma->end, 2 * addr - vma->end),
811 therefore return false. */
812 uintptr_t testaddr = addr + (addr - vma->end);
813 if (testaddr < addr) /* overflow? */
815 /* Here vma->end - 1 < addr <= testaddr. */
816 return is_unmapped (vma->end, testaddr);
822 mincore_get_vma (uintptr_t address, struct vma_struct *vma)
826 address = (address / pagesize) * pagesize;
827 vma->start = mapped_range_start (address);
828 vma->end = mapped_range_end (address);
829 vma->is_near_this = mincore_is_near_this;
835 /* ========================================================================== */
837 /* ---------------------------- stackvma-linux.c ---------------------------- */
839 #if defined __linux__ || defined __ANDROID__ /* Linux */
841 struct callback_locals
844 struct vma_struct *vma;
845 # if STACK_DIRECTION < 0
848 int stop_at_next_vma;
854 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
856 # if STACK_DIRECTION < 0
857 if (locals->address >= start && locals->address <= end - 1)
859 locals->vma->start = start;
860 locals->vma->end = end;
861 locals->vma->prev_end = locals->prev;
867 if (locals->stop_at_next_vma)
869 locals->vma->next_start = start;
870 locals->stop_at_next_vma = 0;
873 if (locals->address >= start && locals->address <= end - 1)
875 locals->vma->start = start;
876 locals->vma->end = end;
878 locals->stop_at_next_vma = 1;
886 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
888 struct callback_locals locals;
889 locals.address = address;
891 # if STACK_DIRECTION < 0
894 locals.stop_at_next_vma = 0;
898 vma_iterate (&locals);
899 if (locals.retval == 0)
901 # if !(STACK_DIRECTION < 0)
902 if (locals.stop_at_next_vma)
905 vma->is_near_this = simple_is_near_this;
909 return mincore_get_vma (address, vma);
912 /* --------------------------- stackvma-freebsd.c --------------------------- */
914 #elif defined __FreeBSD_kernel__ || defined __FreeBSD__ || defined __DragonFly__ /* GNU/kFreeBSD, FreeBSD */
916 struct callback_locals
919 struct vma_struct *vma;
920 /* The stack appears as multiple adjacents segments, therefore we
921 merge adjacent segments. */
922 uintptr_t curr_start, curr_end;
923 # if STACK_DIRECTION < 0
926 int stop_at_next_vma;
932 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
934 if (start == locals->curr_end)
936 /* Merge adjacent segments. */
937 locals->curr_end = end;
940 # if STACK_DIRECTION < 0
941 if (locals->curr_start < locals->curr_end
942 && locals->address >= locals->curr_start
943 && locals->address <= locals->curr_end - 1)
945 locals->vma->start = locals->curr_start;
946 locals->vma->end = locals->curr_end;
947 locals->vma->prev_end = locals->prev_end;
951 locals->prev_end = locals->curr_end;
953 if (locals->stop_at_next_vma)
955 locals->vma->next_start = locals->curr_start;
956 locals->stop_at_next_vma = 0;
959 if (locals->curr_start < locals->curr_end
960 && locals->address >= locals->curr_start
961 && locals->address <= locals->curr_end - 1)
963 locals->vma->start = locals->curr_start;
964 locals->vma->end = locals->curr_end;
966 locals->stop_at_next_vma = 1;
970 locals->curr_start = start; locals->curr_end = end;
975 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
977 struct callback_locals locals;
978 locals.address = address;
980 locals.curr_start = 0;
982 # if STACK_DIRECTION < 0
985 locals.stop_at_next_vma = 0;
989 vma_iterate (&locals);
990 if (locals.retval < 0)
992 if (locals.curr_start < locals.curr_end
993 && address >= locals.curr_start && address <= locals.curr_end - 1)
995 vma->start = locals.curr_start;
996 vma->end = locals.curr_end;
997 # if STACK_DIRECTION < 0
998 vma->prev_end = locals.prev_end;
1000 vma->next_start = 0;
1005 if (locals.retval == 0)
1007 # if !(STACK_DIRECTION < 0)
1008 if (locals.stop_at_next_vma)
1009 vma->next_start = 0;
1011 vma->is_near_this = simple_is_near_this;
1015 /* FreeBSD 6.[01] doesn't allow to distinguish unmapped pages from
1016 mapped but swapped-out pages. See whether it's fixed. */
1018 /* OK, mincore() appears to work as expected. */
1019 return mincore_get_vma (address, vma);
1023 /* --------------------------- stackvma-netbsd.c --------------------------- */
1025 #elif defined __NetBSD__ /* NetBSD */
1027 struct callback_locals
1030 struct vma_struct *vma;
1031 /* The stack appears as multiple adjacents segments, therefore we
1032 merge adjacent segments. */
1033 uintptr_t curr_start, curr_end;
1034 # if STACK_DIRECTION < 0
1037 int stop_at_next_vma;
1043 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
1045 if (start == locals->curr_end)
1047 /* Merge adjacent segments. */
1048 locals->curr_end = end;
1051 # if STACK_DIRECTION < 0
1052 if (locals->curr_start < locals->curr_end
1053 && locals->address >= locals->curr_start
1054 && locals->address <= locals->curr_end - 1)
1056 locals->vma->start = locals->curr_start;
1057 locals->vma->end = locals->curr_end;
1058 locals->vma->prev_end = locals->prev_end;
1062 locals->prev_end = locals->curr_end;
1064 if (locals->stop_at_next_vma)
1066 locals->vma->next_start = locals->curr_start;
1067 locals->stop_at_next_vma = 0;
1070 if (locals->curr_start < locals->curr_end
1071 && locals->address >= locals->curr_start
1072 && locals->address <= locals->curr_end - 1)
1074 locals->vma->start = locals->curr_start;
1075 locals->vma->end = locals->curr_end;
1077 locals->stop_at_next_vma = 1;
1081 locals->curr_start = start; locals->curr_end = end;
1086 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1088 struct callback_locals locals;
1089 locals.address = address;
1091 locals.curr_start = 0;
1092 locals.curr_end = 0;
1093 # if STACK_DIRECTION < 0
1094 locals.prev_end = 0;
1096 locals.stop_at_next_vma = 0;
1100 vma_iterate (&locals);
1101 if (locals.retval < 0)
1103 if (locals.curr_start < locals.curr_end
1104 && address >= locals.curr_start && address <= locals.curr_end - 1)
1106 vma->start = locals.curr_start;
1107 vma->end = locals.curr_end;
1108 # if STACK_DIRECTION < 0
1109 vma->prev_end = locals.prev_end;
1111 vma->next_start = 0;
1116 if (locals.retval == 0)
1118 # if !(STACK_DIRECTION < 0)
1119 if (locals.stop_at_next_vma)
1120 vma->next_start = 0;
1122 vma->is_near_this = simple_is_near_this;
1126 return mincore_get_vma (address, vma);
1129 /* --------------------------- stackvma-mquery.c --------------------------- */
1131 /* mquery() is a system call that allows to inquire the status of a
1132 range of pages of virtual memory. In particular, it allows to inquire
1133 whether a page is mapped at all, and where is the next unmapped page
1134 after a given address.
1135 As of 2021, mquery() is supported by:
1136 - OpenBSD, since OpenBSD 3.4.
1137 Note that this file can give different results. For example, on
1138 OpenBSD 4.4 / i386 the stack segment (which starts around 0xcdbfe000)
1139 ends at 0xcfbfdfff according to mincore, but at 0xffffffff according to
1142 #elif defined __OpenBSD__ /* OpenBSD */
1144 # include <unistd.h> /* getpagesize, mincore */
1145 # include <sys/types.h>
1146 # include <sys/mman.h> /* mincore */
1148 /* Cache for getpagesize(). */
1149 static uintptr_t pagesize;
1151 /* Initialize pagesize. */
1153 init_pagesize (void)
1155 pagesize = getpagesize ();
1158 /* Test whether the page starting at ADDR is among the address range.
1159 ADDR must be a multiple of pagesize. */
1161 is_mapped (uintptr_t addr)
1163 /* Avoid calling mquery with a NULL first argument, because this argument
1164 value has a specific meaning. We know the NULL page is unmapped. */
1167 return mquery ((void *) addr, pagesize, 0, MAP_FIXED, -1, 0) == (void *) -1;
1170 /* Assuming that the page starting at ADDR is among the address range,
1171 return the start of its virtual memory range.
1172 ADDR must be a multiple of pagesize. */
1174 mapped_range_start (uintptr_t addr)
1177 uintptr_t known_unmapped_page;
1179 /* Look at smaller addresses, in larger and larger steps, to minimize the
1180 number of mquery() calls. */
1181 stepsize = pagesize;
1189 if (addr <= stepsize)
1191 known_unmapped_page = 0;
1195 hole = (uintptr_t) mquery ((void *) (addr - stepsize), pagesize,
1197 if (!(hole == (uintptr_t) (void *) -1 || hole >= addr))
1199 /* Some part of [addr - stepsize, addr - 1] is unmapped. */
1200 known_unmapped_page = hole;
1204 /* The entire range [addr - stepsize, addr - 1] is mapped. */
1207 if (2 * stepsize > stepsize && 2 * stepsize < addr)
1208 stepsize = 2 * stepsize;
1211 /* Now reduce the step size again.
1212 We know that the page at known_unmapped_page is unmapped and that
1213 0 < addr - known_unmapped_page <= stepsize. */
1214 while (stepsize > pagesize && stepsize / 2 >= addr - known_unmapped_page)
1215 stepsize = stepsize / 2;
1216 /* Still 0 < addr - known_unmapped_page <= stepsize. */
1217 while (stepsize > pagesize)
1221 stepsize = stepsize / 2;
1222 hole = (uintptr_t) mquery ((void *) (addr - stepsize), pagesize,
1224 if (!(hole == (uintptr_t) (void *) -1 || hole >= addr))
1225 /* Some part of [addr - stepsize, addr - 1] is unmapped. */
1226 known_unmapped_page = hole;
1228 /* The entire range [addr - stepsize, addr - 1] is mapped. */
1230 /* Still 0 < addr - known_unmapped_page <= stepsize. */
1236 /* Assuming that the page starting at ADDR is among the address range,
1237 return the end of its virtual memory range + 1.
1238 ADDR must be a multiple of pagesize. */
1240 mapped_range_end (uintptr_t addr)
1247 end = (uintptr_t) mquery ((void *) addr, pagesize, 0, 0, -1, 0);
1248 if (end == (uintptr_t) (void *) -1)
1249 end = 0; /* wrap around */
1253 /* Determine whether an address range [ADDR1..ADDR2] is completely unmapped.
1254 ADDR1 must be <= ADDR2. */
1256 is_unmapped (uintptr_t addr1, uintptr_t addr2)
1258 /* Round addr1 down. */
1259 addr1 = (addr1 / pagesize) * pagesize;
1260 /* Round addr2 up and turn it into an exclusive bound. */
1261 addr2 = ((addr2 / pagesize) + 1) * pagesize;
1263 /* Avoid calling mquery with a NULL first argument, because this argument
1264 value has a specific meaning. We know the NULL page is unmapped. */
1270 if (mquery ((void *) addr1, addr2 - addr1, 0, MAP_FIXED, -1, 0)
1272 /* Not all the interval [addr1 .. addr2 - 1] is unmapped. */
1275 /* The interval [addr1 .. addr2 - 1] is unmapped. */
1281 # if STACK_DIRECTION < 0
1283 /* Info about the gap between this VMA and the previous one.
1284 addr must be < vma->start. */
1286 mquery_is_near_this (uintptr_t addr, struct vma_struct *vma)
1288 /* vma->start - addr <= (vma->start - vma->prev_end) / 2
1289 is mathematically equivalent to
1290 vma->prev_end <= 2 * addr - vma->start
1291 <==> is_unmapped (2 * addr - vma->start, vma->start - 1).
1292 But be careful about overflow: if 2 * addr - vma->start is negative,
1293 we consider a tiny "guard page" mapping [0, 0] to be present around
1294 NULL; it intersects the range (2 * addr - vma->start, vma->start - 1),
1295 therefore return false. */
1296 uintptr_t testaddr = addr - (vma->start - addr);
1297 if (testaddr > addr) /* overflow? */
1299 /* Here testaddr <= addr < vma->start. */
1300 return is_unmapped (testaddr, vma->start - 1);
1304 # if STACK_DIRECTION > 0
1306 /* Info about the gap between this VMA and the next one.
1307 addr must be > vma->end - 1. */
1309 mquery_is_near_this (uintptr_t addr, struct vma_struct *vma)
1311 /* addr - vma->end < (vma->next_start - vma->end) / 2
1312 is mathematically equivalent to
1313 vma->next_start > 2 * addr - vma->end
1314 <==> is_unmapped (vma->end, 2 * addr - vma->end).
1315 But be careful about overflow: if 2 * addr - vma->end is > ~0UL,
1316 we consider a tiny "guard page" mapping [0, 0] to be present around
1317 NULL; it intersects the range (vma->end, 2 * addr - vma->end),
1318 therefore return false. */
1319 uintptr_t testaddr = addr + (addr - vma->end);
1320 if (testaddr < addr) /* overflow? */
1322 /* Here vma->end - 1 < addr <= testaddr. */
1323 return is_unmapped (vma->end, testaddr);
1329 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1333 address = (address / pagesize) * pagesize;
1334 vma->start = mapped_range_start (address);
1335 vma->end = mapped_range_end (address);
1336 vma->is_near_this = mquery_is_near_this;
1340 /* ---------------------------- stackvma-mach.c ---------------------------- */
1342 #elif (defined __APPLE__ && defined __MACH__) /* macOS */
1346 #include <mach/mach.h>
1347 #include <mach/machine/vm_param.h>
1350 sigsegv_get_vma (uintptr_t req_address, struct vma_struct *vma)
1352 uintptr_t prev_address = 0, prev_size = 0;
1353 uintptr_t join_address = 0, join_size = 0;
1355 vm_address_t address;
1357 task_t task = mach_task_self ();
1359 for (address = VM_MIN_ADDRESS; more; address += size)
1361 mach_port_t object_name;
1362 /* In MacOS X 10.5, the types vm_address_t, vm_offset_t, vm_size_t have
1363 32 bits in 32-bit processes and 64 bits in 64-bit processes. Whereas
1364 mach_vm_address_t and mach_vm_size_t are always 64 bits large.
1365 MacOS X 10.5 has three vm_region like methods:
1366 - vm_region. It has arguments that depend on whether the current
1367 process is 32-bit or 64-bit. When linking dynamically, this
1368 function exists only in 32-bit processes. Therefore we use it only
1369 in 32-bit processes.
1370 - vm_region_64. It has arguments that depend on whether the current
1371 process is 32-bit or 64-bit. It interprets a flavor
1372 VM_REGION_BASIC_INFO as VM_REGION_BASIC_INFO_64, which is
1373 dangerous since 'struct vm_region_basic_info_64' is larger than
1374 'struct vm_region_basic_info'; therefore let's write
1375 VM_REGION_BASIC_INFO_64 explicitly.
1376 - mach_vm_region. It has arguments that are 64-bit always. This
1377 function is useful when you want to access the VM of a process
1378 other than the current process.
1379 In 64-bit processes, we could use vm_region_64 or mach_vm_region.
1380 I choose vm_region_64 because it uses the same types as vm_region,
1381 resulting in less conditional code. */
1382 # if defined __aarch64__ || defined __ppc64__ || defined __x86_64__
1383 struct vm_region_basic_info_64 info;
1384 mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
1386 more = (vm_region_64 (task, &address, &size, VM_REGION_BASIC_INFO_64,
1387 (vm_region_info_t)&info, &info_count, &object_name)
1390 struct vm_region_basic_info info;
1391 mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT;
1393 more = (vm_region (task, &address, &size, VM_REGION_BASIC_INFO,
1394 (vm_region_info_t)&info, &info_count, &object_name)
1399 address = join_address + join_size;
1403 if ((uintptr_t) address == join_address + join_size)
1407 prev_address = join_address;
1408 prev_size = join_size;
1409 join_address = (uintptr_t) address;
1413 if (object_name != MACH_PORT_NULL)
1414 mach_port_deallocate (mach_task_self (), object_name);
1416 # if STACK_DIRECTION < 0
1417 if (join_address <= req_address && join_address + join_size > req_address)
1419 vma->start = join_address;
1420 vma->end = join_address + join_size;
1421 vma->prev_end = prev_address + prev_size;
1422 vma->is_near_this = simple_is_near_this;
1426 if (prev_address <= req_address && prev_address + prev_size > req_address)
1428 vma->start = prev_address;
1429 vma->end = prev_address + prev_size;
1430 vma->next_start = join_address;
1431 vma->is_near_this = simple_is_near_this;
1437 # if STACK_DIRECTION > 0
1438 if (join_address <= req_address && join_address + size > req_address)
1440 vma->start = prev_address;
1441 vma->end = prev_address + prev_size;
1442 vma->next_start = ~0UL;
1443 vma->is_near_this = simple_is_near_this;
1451 /* -------------------------------------------------------------------------- */
1453 #elif defined _AIX /* AIX */
1456 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1458 return mincore_get_vma (address, vma);
1461 /* --------------------------- stackvma-procfs.h --------------------------- */
1463 #elif defined __sgi || defined __sun /* IRIX, Solaris */
1465 # include <errno.h> /* errno, EINTR */
1466 # include <fcntl.h> /* open, O_RDONLY */
1467 # include <stddef.h> /* size_t */
1468 # include <unistd.h> /* getpagesize, getpid, read, close */
1469 # include <sys/types.h>
1470 # include <sys/mman.h> /* mmap, munmap */
1471 # include <sys/stat.h> /* fstat */
1472 # include <string.h> /* memcpy */
1474 /* Try to use the newer ("structured") /proc filesystem API, if supported. */
1475 # define _STRUCTURED_PROC 1
1476 # include <sys/procfs.h> /* prmap_t, optionally PIOC* */
1480 /* Cache for getpagesize(). */
1481 static uintptr_t pagesize;
1483 /* Initialize pagesize. */
1485 init_pagesize (void)
1487 pagesize = getpagesize ();
1492 struct callback_locals
1495 struct vma_struct *vma;
1496 # if STACK_DIRECTION < 0
1499 int stop_at_next_vma;
1505 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
1507 # if STACK_DIRECTION < 0
1508 if (locals->address >= start && locals->address <= end - 1)
1510 locals->vma->start = start;
1511 locals->vma->end = end;
1512 locals->vma->prev_end = locals->prev;
1518 if (locals->stop_at_next_vma)
1520 locals->vma->next_start = start;
1521 locals->stop_at_next_vma = 0;
1524 if (locals->address >= start && locals->address <= end - 1)
1526 locals->vma->start = start;
1527 locals->vma->end = end;
1529 locals->stop_at_next_vma = 1;
1536 /* Iterate over the virtual memory areas of the current process.
1537 If such iteration is supported, the callback is called once for every
1538 virtual memory area, in ascending order, with the following arguments:
1539 - LOCALS is the same argument as passed to vma_iterate.
1540 - START is the address of the first byte in the area, page-aligned.
1541 - END is the address of the last byte in the area plus 1, page-aligned.
1542 Note that it may be 0 for the last area in the address space.
1543 If the callback returns 0, the iteration continues. If it returns 1,
1544 the iteration terminates prematurely.
1545 This function may open file descriptors, but does not call malloc().
1546 Return 0 if all went well, or -1 in case of error. */
1547 /* This code is a simplied copy (no handling of protection flags) of the
1548 code in gnulib's lib/vma-iter.c. */
1550 vma_iterate (struct callback_locals *locals)
1552 /* Note: Solaris <sys/procfs.h> defines a different type prmap_t with
1553 _STRUCTURED_PROC than without! Here's a table of sizeof(prmap_t):
1555 _STRUCTURED_PROC = 0 32 56
1556 _STRUCTURED_PROC = 1 96 104
1557 Therefore, if the include files provide the newer API, prmap_t has
1558 the bigger size, and thus you MUST use the newer API. And if the
1559 include files provide the older API, prmap_t has the smaller size,
1560 and thus you MUST use the older API. */
1562 # if defined PIOCNMAP && defined PIOCMAP
1563 /* We must use the older /proc interface. */
1565 char fnamebuf[6+10+1];
1570 # if HAVE_MAP_ANONYMOUS
1572 # define map_flags MAP_ANONYMOUS
1573 # else /* !HAVE_MAP_ANONYMOUS */
1575 # define map_flags 0
1578 uintptr_t auxmap_start;
1579 uintptr_t auxmap_end;
1586 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u", getpid ()). */
1587 fname = fnamebuf + sizeof (fnamebuf) - 1;
1590 unsigned int value = getpid ();
1592 *--fname = (value % 10) + '0';
1593 while ((value = value / 10) > 0);
1596 memcpy (fname, "/proc/", 6);
1598 fd = open (fname, O_RDONLY);
1602 if (ioctl (fd, PIOCNMAP, &nmaps) < 0)
1605 memneed = (nmaps + 10) * sizeof (prmap_t);
1606 /* Allocate memneed bytes of memory.
1607 We cannot use alloca here, because not much stack space is guaranteed.
1608 We also cannot use malloc here, because a malloc() call may call mmap()
1609 and thus pre-allocate available memory.
1610 So use mmap(), and ignore the resulting VMA. */
1611 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
1612 # if !HAVE_MAP_ANONYMOUS
1613 zero_fd = open ("/dev/zero", O_RDONLY, 0644);
1617 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1618 map_flags | MAP_PRIVATE, zero_fd, 0);
1619 # if !HAVE_MAP_ANONYMOUS
1622 if (auxmap == (void *) -1)
1624 auxmap_start = (uintptr_t) auxmap;
1625 auxmap_end = auxmap_start + memneed;
1626 maps = (prmap_t *) auxmap;
1628 if (ioctl (fd, PIOCMAP, maps) < 0)
1633 uintptr_t start, end;
1635 start = (uintptr_t) mp->pr_vaddr;
1636 end = start + mp->pr_size;
1637 if (start == 0 && end == 0)
1640 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1642 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1643 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1644 if (start < auxmap_start)
1645 if (callback (locals, start, auxmap_start))
1647 if (auxmap_end - 1 < end - 1)
1648 if (callback (locals, auxmap_end, end))
1653 if (callback (locals, start, end))
1657 munmap (auxmap, memneed);
1662 munmap (auxmap, memneed);
1668 /* We must use the newer /proc interface.
1670 https://docs.oracle.com/cd/E23824_01/html/821-1473/proc-4.html
1671 The contents of /proc/<pid>/map consists of records of type
1672 prmap_t. These are different in 32-bit and 64-bit processes,
1673 but here we are fortunately accessing only the current process. */
1675 char fnamebuf[6+10+4+1];
1680 # if HAVE_MAP_ANONYMOUS
1682 # define map_flags MAP_ANONYMOUS
1683 # else /* !HAVE_MAP_ANONYMOUS */
1685 # define map_flags 0
1688 uintptr_t auxmap_start;
1689 uintptr_t auxmap_end;
1697 /* Construct fname = sprintf (fnamebuf+i, "/proc/%u/map", getpid ()). */
1698 fname = fnamebuf + sizeof (fnamebuf) - 1 - 4;
1699 memcpy (fname, "/map", 4 + 1);
1701 unsigned int value = getpid ();
1703 *--fname = (value % 10) + '0';
1704 while ((value = value / 10) > 0);
1707 memcpy (fname, "/proc/", 6);
1709 fd = open (fname, O_RDONLY);
1714 struct stat statbuf;
1715 if (fstat (fd, &statbuf) < 0)
1717 nmaps = statbuf.st_size / sizeof (prmap_t);
1720 memneed = (nmaps + 10) * sizeof (prmap_t);
1721 /* Allocate memneed bytes of memory.
1722 We cannot use alloca here, because not much stack space is guaranteed.
1723 We also cannot use malloc here, because a malloc() call may call mmap()
1724 and thus pre-allocate available memory.
1725 So use mmap(), and ignore the resulting VMA. */
1726 memneed = ((memneed - 1) / pagesize + 1) * pagesize;
1727 # if !HAVE_MAP_ANONYMOUS
1728 zero_fd = open ("/dev/zero", O_RDONLY, 0644);
1732 auxmap = (void *) mmap ((void *) 0, memneed, PROT_READ | PROT_WRITE,
1733 map_flags | MAP_PRIVATE, zero_fd, 0);
1734 # if !HAVE_MAP_ANONYMOUS
1737 if (auxmap == (void *) -1)
1739 auxmap_start = (uintptr_t) auxmap;
1740 auxmap_end = auxmap_start + memneed;
1741 maps = (prmap_t *) auxmap;
1743 /* Read up to memneed bytes from fd into maps. */
1745 size_t remaining = memneed;
1746 size_t total_read = 0;
1747 char *ptr = (char *) maps;
1751 size_t nread = read (fd, ptr, remaining);
1752 if (nread == (size_t)-1)
1761 total_read += nread;
1765 while (remaining > 0);
1767 nmaps = (memneed - remaining) / sizeof (prmap_t);
1768 maps_end = maps + nmaps;
1771 for (mp = maps; mp < maps_end; mp++)
1773 uintptr_t start, end;
1775 start = (uintptr_t) mp->pr_vaddr;
1776 end = start + mp->pr_size;
1777 if (start <= auxmap_start && auxmap_end - 1 <= end - 1)
1779 /* Consider [start,end-1] \ [auxmap_start,auxmap_end-1]
1780 = [start,auxmap_start-1] u [auxmap_end,end-1]. */
1781 if (start < auxmap_start)
1782 if (callback (locals, start, auxmap_start))
1784 if (auxmap_end - 1 < end - 1)
1785 if (callback (locals, auxmap_end, end))
1790 if (callback (locals, start, end))
1794 munmap (auxmap, memneed);
1799 munmap (auxmap, memneed);
1808 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1810 struct callback_locals locals;
1811 locals.address = address;
1813 # if STACK_DIRECTION < 0
1816 locals.stop_at_next_vma = 0;
1820 vma_iterate (&locals);
1821 if (locals.retval == 0)
1823 # if !(STACK_DIRECTION < 0)
1824 if (locals.stop_at_next_vma)
1825 vma->next_start = 0;
1827 vma->is_near_this = simple_is_near_this;
1832 return mincore_get_vma (address, vma);
1838 /* -------------------------------------------------------------------------- */
1840 #elif defined __CYGWIN__ /* Cygwin */
1842 struct callback_locals
1845 struct vma_struct *vma;
1846 /* The stack appears as three adjacents segments, therefore we
1847 merge adjacent segments. */
1848 uintptr_t curr_start, curr_end;
1849 # if STACK_DIRECTION < 0
1852 int stop_at_next_vma;
1858 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
1860 if (start == locals->curr_end)
1862 /* Merge adjacent segments. */
1863 locals->curr_end = end;
1866 # if STACK_DIRECTION < 0
1867 if (locals->curr_start < locals->curr_end
1868 && locals->address >= locals->curr_start
1869 && locals->address <= locals->curr_end - 1)
1871 locals->vma->start = locals->curr_start;
1872 locals->vma->end = locals->curr_end;
1873 locals->vma->prev_end = locals->prev_end;
1877 locals->prev_end = locals->curr_end;
1879 if (locals->stop_at_next_vma)
1881 locals->vma->next_start = locals->curr_start;
1882 locals->stop_at_next_vma = 0;
1885 if (locals->curr_start < locals->curr_end
1886 && locals->address >= locals->curr_start
1887 && locals->address <= locals->curr_end - 1)
1889 locals->vma->start = locals->curr_start;
1890 locals->vma->end = locals->curr_end;
1892 locals->stop_at_next_vma = 1;
1896 locals->curr_start = start; locals->curr_end = end;
1901 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
1903 struct callback_locals locals;
1904 locals.address = address;
1906 locals.curr_start = 0;
1907 locals.curr_end = 0;
1908 # if STACK_DIRECTION < 0
1909 locals.prev_end = 0;
1911 locals.stop_at_next_vma = 0;
1915 vma_iterate (&locals);
1916 if (locals.retval < 0)
1918 if (locals.curr_start < locals.curr_end
1919 && address >= locals.curr_start && address <= locals.curr_end - 1)
1921 vma->start = locals.curr_start;
1922 vma->end = locals.curr_end;
1923 # if STACK_DIRECTION < 0
1924 vma->prev_end = locals.prev_end;
1926 vma->next_start = 0;
1931 if (locals.retval == 0)
1933 # if !(STACK_DIRECTION < 0)
1934 if (locals.stop_at_next_vma)
1935 vma->next_start = 0;
1937 vma->is_near_this = simple_is_near_this;
1944 /* ---------------------------- stackvma-beos.h ---------------------------- */
1946 #elif defined __HAIKU__ /* Haiku */
1948 # include <OS.h> /* get_next_area_info */
1950 struct callback_locals
1953 struct vma_struct *vma;
1954 # if STACK_DIRECTION < 0
1957 int stop_at_next_vma;
1963 callback (struct callback_locals *locals, uintptr_t start, uintptr_t end)
1965 # if STACK_DIRECTION < 0
1966 if (locals->address >= start && locals->address <= end - 1)
1968 locals->vma->start = start;
1969 locals->vma->end = end;
1970 locals->vma->prev_end = locals->prev;
1976 if (locals->stop_at_next_vma)
1978 locals->vma->next_start = start;
1979 locals->stop_at_next_vma = 0;
1982 if (locals->address >= start && locals->address <= end - 1)
1984 locals->vma->start = start;
1985 locals->vma->end = end;
1987 locals->stop_at_next_vma = 1;
1994 /* Iterate over the virtual memory areas of the current process.
1995 If such iteration is supported, the callback is called once for every
1996 virtual memory area, in ascending order, with the following arguments:
1997 - LOCALS is the same argument as passed to vma_iterate.
1998 - START is the address of the first byte in the area, page-aligned.
1999 - END is the address of the last byte in the area plus 1, page-aligned.
2000 Note that it may be 0 for the last area in the address space.
2001 If the callback returns 0, the iteration continues. If it returns 1,
2002 the iteration terminates prematurely.
2003 This function may open file descriptors, but does not call malloc().
2004 Return 0 if all went well, or -1 in case of error. */
2005 /* This code is a simplied copy (no handling of protection flags) of the
2006 code in gnulib's lib/vma-iter.c. */
2008 vma_iterate (struct callback_locals *locals)
2014 while (get_next_area_info (0, &cookie, &info) == B_OK)
2016 uintptr_t start, end;
2018 start = (uintptr_t) info.address;
2019 end = start + info.size;
2021 if (callback (locals, start, end))
2028 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)
2030 struct callback_locals locals;
2031 locals.address = address;
2033 # if STACK_DIRECTION < 0
2036 locals.stop_at_next_vma = 0;
2040 vma_iterate (&locals);
2041 if (locals.retval == 0)
2043 # if !(STACK_DIRECTION < 0)
2044 if (locals.stop_at_next_vma)
2045 vma->next_start = 0;
2047 vma->is_near_this = simple_is_near_this;
2053 /* -------------------------------------------------------------------------- */
2055 #else /* Hurd, Minix, ... */
2058 sigsegv_get_vma (uintptr_t address, struct vma_struct *vma)