Issue #271 (bdwgc).
* allchblk.c [USE_MUNMAP && MPROTECT_VDB] (GC_has_unmapped_memory):
Remove function definition.
* os_dep.c [MPROTECT_VDB && USE_MUNMAP] (GC_dirty_init): Likewise.
* doc/README.macros (USE_MUNMAP): Update the description.
* include/private/gc_priv.h [MPROTECT_VDB && USE_MUNMAP]
(GC_mprotect_dirty_init, GC_has_unmapped_memory): Remove prototype.
* include/private/gcconfig.h [USE_MUNMAP && GWW_VDB] (MPROTECT_VDB):
Do not undefine; remove TODO; remove comment.
* include/private/gcconfig.h: Remove comment that PARALLEL_MARK does
not cause undefine MPROTECT_VDB.
* os_dep.c [MPROTECT_VDB && !USE_MUNMAP] (GC_mprotect_dirty_init):
Remove macro.
* os_dep.c [MPROTECT_VDB] (GC_mprotect_dirty_init): Rename to
GC_dirty_init.
* tests/test.c [!GC_DISABLE_INCREMENTAL && (TEST_DEFAULT_VDB
|| !DEFAULT_VDB)] (main): Call GC_enable_incremental() even in case of
both MPROTECT_VDB and USE_MUNMAP are defined.
}
}
-# ifdef MPROTECT_VDB
- GC_INNER GC_bool GC_has_unmapped_memory(void)
- {
- int i;
-
- for (i = 0; i <= N_HBLK_FLS; ++i) {
- struct hblk * h;
- hdr * hhdr;
-
- for (h = GC_hblkfreelist[i]; h != NULL; h = hhdr -> hb_next) {
- hhdr = HDR(h);
- if (!IS_MAPPED(hhdr)) return TRUE;
- }
- }
- return FALSE;
- }
-# endif /* MPROTECT_VDB */
-
/* Merge all unmapped blocks that are adjacent to other free */
/* blocks. This may involve remapping, since all blocks are either */
/* fully mapped or fully unmapped. */
Works for Linux, FreeBSD, Cygwin, Solaris and Irix.
USE_MUNMAP Causes memory to be returned to the OS under the right
- circumstances. This currently disables VM-based incremental collection
- (except for Win32 with GetWriteWatch() available).
- Works under some Unix, Linux and Windows versions.
+ circumstances. Works under some Unix, Linux and Windows versions.
Requires USE_MMAP except for Windows.
USE_WINALLOC (Cygwin only) Use Win32 VirtualAlloc (instead of sbrk or mmap)
GC_EXTERN ptr_t * GC_gcjobjfreelist;
#endif
-#ifdef MPROTECT_VDB
-# ifdef GWW_VDB
+#if defined(MPROTECT_VDB) && defined(GWW_VDB)
GC_INNER GC_bool GC_gww_dirty_init(void);
/* Returns TRUE if GetWriteWatch is available. */
/* May be called repeatedly. */
-# endif
-# ifdef USE_MUNMAP
- GC_INNER GC_bool GC_mprotect_dirty_init(void);
- GC_INNER GC_bool GC_has_unmapped_memory(void);
-# endif
-#endif /* MPROTECT_VDB */
+#endif
#if defined(CHECKSUMS) || defined(PROC_VDB)
GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h);
# undef MPROTECT_VDB
#endif
-#if defined(USE_MUNMAP) && defined(GWW_VDB)
-# undef MPROTECT_VDB /* TODO: Cannot deal with address space holes. */
- /* Else if MPROTECT_VDB is available but not GWW_VDB then decide */
- /* whether to disable memory unmapping or mprotect-based virtual */
- /* dirty bits at runtime when GC_enable_incremental is called. */
-#endif
-
-/* PARALLEL_MARK does not cause undef MPROTECT_VDB any longer. */
-
#if defined(MPROTECT_VDB) && defined(GC_PREFER_MPROTECT_VDB)
/* Choose MPROTECT_VDB manually (if multiple strategies available). */
# undef PCR_VDB
# endif
#endif /* !DARWIN */
-#ifdef USE_MUNMAP
- /* MPROTECT_VDB cannot deal with address space holes (for now), */
- /* so if the collector is configured with both MPROTECT_VDB and */
- /* USE_MUNMAP then, as a work around, select only one of them */
- /* during GC_init or GC_enable_incremental. */
- GC_INNER GC_bool GC_dirty_init(void)
- {
- if (GC_unmap_threshold != 0) {
- if (GETENV("GC_UNMAP_THRESHOLD") != NULL
- || GETENV("GC_FORCE_UNMAP_ON_GCOLLECT") != NULL
- || GC_has_unmapped_memory()) {
- WARN("Can't maintain mprotect-based dirty bits"
- " in case of unmapping\n", 0);
- return FALSE;
- }
- GC_unmap_threshold = 0; /* in favor of incremental collection */
- WARN("Memory unmapping is disabled as incompatible"
- " with MPROTECT_VDB\n", 0);
- }
- return GC_mprotect_dirty_init();
- }
-#else
-# define GC_mprotect_dirty_init GC_dirty_init
-#endif /* !USE_MUNMAP */
-
#if !defined(DARWIN)
- GC_INNER GC_bool GC_mprotect_dirty_init(void)
+ GC_INNER GC_bool GC_dirty_init(void)
{
# if !defined(MSWIN32) && !defined(MSWINCE)
struct sigaction act, oldact;
}
#endif /* BROKEN_EXCEPTION_HANDLING */
-GC_INNER GC_bool GC_mprotect_dirty_init(void)
+GC_INNER GC_bool GC_dirty_init(void)
{
kern_return_t r;
mach_port_t me;
GC_set_warn_proc(warn_proc);
# if !defined(GC_DISABLE_INCREMENTAL) \
&& (defined(TEST_DEFAULT_VDB) || !defined(DEFAULT_VDB))
-# if !defined(MAKE_BACK_GRAPH) && !defined(NO_INCREMENTAL) \
- && !(defined(MPROTECT_VDB) && defined(USE_MUNMAP))
+# if !defined(MAKE_BACK_GRAPH) && !defined(NO_INCREMENTAL)
GC_enable_incremental();
# endif
if (GC_is_incremental_mode()) {
# if !defined(GC_DISABLE_INCREMENTAL) \
&& (defined(TEST_DEFAULT_VDB) || !defined(DEFAULT_VDB))
# if !defined(REDIRECT_MALLOC) && !defined(MAKE_BACK_GRAPH) \
- && !defined(USE_PROC_FOR_LIBRARIES) && !defined(NO_INCREMENTAL) \
- && !defined(USE_MUNMAP)
+ && !defined(USE_PROC_FOR_LIBRARIES) && !defined(NO_INCREMENTAL)
GC_enable_incremental();
# endif
if (GC_is_incremental_mode()) {