2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
11 * Permission is hereby granted to use or copy this program
12 * for any purpose, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
25 #if !defined(GC_BUILD) && !defined(NOT_GCBUILD)
29 #if (defined(__linux__) || defined(__GLIBC__) || defined(__GNU__)) \
30 && !defined(_GNU_SOURCE)
31 /* Can't test LINUX, since this must be defined before other includes. */
32 # define _GNU_SOURCE 1
35 #if defined(__INTERIX) && !defined(_ALL_SOURCE)
36 # define _ALL_SOURCE 1
39 #if (defined(DGUX) && defined(GC_THREADS) || defined(DGUX386_THREADS) \
40 || defined(GC_DGUX386_THREADS)) && !defined(_USING_POSIX4A_DRAFT10)
41 # define _USING_POSIX4A_DRAFT10 1
44 #if defined(__MINGW32__) && !defined(__MINGW_EXCPT_DEFINE_PSDK) \
45 && defined(__i386__) && defined(GC_EXTERN) /* defined in gc.c */
46 /* See the description in mark.c. */
47 # define __MINGW_EXCPT_DEFINE_PSDK 1
50 # if defined(NO_DEBUGGING) && !defined(GC_ASSERTIONS) && !defined(NDEBUG)
51 /* To turn off assertion checking (in atomic_ops.h). */
60 #if !defined(sony_news)
65 # include <sys/types.h>
66 # include <sys/time.h>
67 # include <sys/resource.h>
71 # include <sys/types.h>
72 # include <sys/time.h>
73 # include <sys/resource.h>
77 # define AO_REQUIRE_CAS
78 # if !defined(__GNUC__) && !defined(AO_ASSUME_WINDOWS98)
79 # define AO_ASSUME_WINDOWS98
83 #include "../gc_tiny_fl.h"
84 #include "../gc_mark.h"
87 typedef GC_signed_word signed_word;
88 typedef unsigned int unsigned32;
95 typedef char * ptr_t; /* A generic pointer to which we can add */
96 /* byte displacements and which can be used */
97 /* for address comparisons. */
98 # define PTR_T_DEFINED
104 #if defined(SIZE_MAX) && !defined(CPPCHECK)
105 # define GC_SIZE_MAX ((size_t)SIZE_MAX)
106 /* Extra cast to workaround some buggy SIZE_MAX definitions. */
108 # define GC_SIZE_MAX (~(size_t)0)
111 #if GC_GNUC_PREREQ(3, 0) && !defined(LINT2)
112 # define EXPECT(expr, outcome) __builtin_expect(expr,outcome)
113 /* Equivalent to (expr), but predict that usually (expr)==outcome. */
115 # define EXPECT(expr, outcome) (expr)
116 #endif /* __GNUC__ */
118 /* Saturated addition of size_t values. Used to avoid value wrap */
119 /* around on overflow. The arguments should have no side effects. */
120 #define SIZET_SAT_ADD(a, b) \
121 (EXPECT((a) < GC_SIZE_MAX - (b), TRUE) ? (a) + (b) : GC_SIZE_MAX)
123 #include "gcconfig.h"
125 #if !defined(GC_ATOMIC_UNCOLLECTABLE) && defined(ATOMIC_UNCOLLECTABLE)
126 /* For compatibility with old-style naming. */
127 # define GC_ATOMIC_UNCOLLECTABLE
131 /* This tagging macro must be used at the start of every variable */
132 /* definition which is declared with GC_EXTERN. Should be also used */
133 /* for the GC-scope function definitions and prototypes. Must not be */
134 /* used in gcconfig.h. Shouldn't be used for the debugging-only */
135 /* functions. Currently, not used for the functions declared in or */
136 /* called from the "dated" source files (located in "extra" folder). */
137 # if defined(GC_DLL) && defined(__GNUC__) && !defined(MSWIN32) \
138 && !defined(MSWINCE) && !defined(CYGWIN32)
139 # if GC_GNUC_PREREQ(4, 0) && !defined(GC_NO_VISIBILITY)
140 /* See the corresponding GC_API definition. */
141 # define GC_INNER __attribute__((__visibility__("hidden")))
143 /* The attribute is unsupported. */
144 # define GC_INNER /* empty */
147 # define GC_INNER /* empty */
150 # define GC_EXTERN extern GC_INNER
151 /* Used only for the GC-scope variables (prefixed with "GC_") */
152 /* declared in the header files. Must not be used for thread-local */
153 /* variables. Must not be used in gcconfig.h. Shouldn't be used for */
154 /* the debugging-only or profiling-only variables. Currently, not */
155 /* used for the variables accessed from the "dated" source files */
156 /* (specific.c/h, and in the "extra" folder). */
157 /* The corresponding variable definition must start with GC_INNER. */
158 #endif /* !GC_INNER */
161 /* Register storage specifier is deprecated in C++11. */
162 # define REGISTER /* empty */
164 /* Used only for several local variables in the performance-critical */
165 /* functions. Should not be used for new code. */
166 # define REGISTER register
169 #if defined(CPPCHECK)
170 # define MACRO_BLKSTMT_BEGIN {
171 # define MACRO_BLKSTMT_END }
173 # define MACRO_BLKSTMT_BEGIN do {
174 # define MACRO_BLKSTMT_END } while (0)
178 # include "gc_hdrs.h"
181 #ifndef GC_ATTR_NO_SANITIZE_ADDR
182 # ifndef ADDRESS_SANITIZER
183 # define GC_ATTR_NO_SANITIZE_ADDR /* empty */
184 # elif GC_CLANG_PREREQ(3, 8)
185 # define GC_ATTR_NO_SANITIZE_ADDR __attribute__((no_sanitize("address")))
187 # define GC_ATTR_NO_SANITIZE_ADDR __attribute__((no_sanitize_address))
189 #endif /* !GC_ATTR_NO_SANITIZE_ADDR */
191 #ifndef GC_ATTR_NO_SANITIZE_MEMORY
192 # ifndef MEMORY_SANITIZER
193 # define GC_ATTR_NO_SANITIZE_MEMORY /* empty */
194 # elif GC_CLANG_PREREQ(3, 8)
195 # define GC_ATTR_NO_SANITIZE_MEMORY __attribute__((no_sanitize("memory")))
197 # define GC_ATTR_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
199 #endif /* !GC_ATTR_NO_SANITIZE_MEMORY */
201 #ifndef GC_ATTR_NO_SANITIZE_THREAD
202 # ifndef THREAD_SANITIZER
203 # define GC_ATTR_NO_SANITIZE_THREAD /* empty */
204 # elif GC_CLANG_PREREQ(3, 8)
205 # define GC_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize("thread")))
207 # define GC_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread))
209 #endif /* !GC_ATTR_NO_SANITIZE_THREAD */
211 #ifndef GC_ATTR_UNUSED
212 # if GC_GNUC_PREREQ(3, 4)
213 # define GC_ATTR_UNUSED __attribute__((__unused__))
215 # define GC_ATTR_UNUSED /* empty */
217 #endif /* !GC_ATTR_UNUSED */
220 /* The "inline" keyword is determined by Autoconf AC_C_INLINE. */
221 # define GC_INLINE static inline
222 #elif defined(_MSC_VER) || defined(__INTEL_COMPILER) || defined(__DMC__) \
223 || (GC_GNUC_PREREQ(3, 0) && defined(__STRICT_ANSI__)) \
224 || defined(__WATCOMC__)
225 # define GC_INLINE static __inline
226 #elif GC_GNUC_PREREQ(3, 0) || defined(__sun)
227 # define GC_INLINE static inline
229 # define GC_INLINE static
232 #ifndef GC_ATTR_NOINLINE
233 # if GC_GNUC_PREREQ(4, 0)
234 # define GC_ATTR_NOINLINE __attribute__((__noinline__))
235 # elif _MSC_VER >= 1400
236 # define GC_ATTR_NOINLINE __declspec(noinline)
238 # define GC_ATTR_NOINLINE /* empty */
242 #ifndef GC_API_OSCALL
243 /* This is used to identify GC routines called by name from OS. */
244 # if defined(__GNUC__)
245 # if GC_GNUC_PREREQ(4, 0) && !defined(GC_NO_VISIBILITY)
246 /* Same as GC_API if GC_DLL. */
247 # define GC_API_OSCALL extern __attribute__((__visibility__("default")))
249 /* The attribute is unsupported. */
250 # define GC_API_OSCALL extern
253 # define GC_API_OSCALL GC_API
258 # define GC_API_PRIV GC_API
261 #if defined(THREADS) && !defined(NN_PLATFORM_CTR)
262 # include "gc_atomic_ops.h"
263 # ifndef AO_HAVE_compiler_barrier
264 # define AO_HAVE_compiler_barrier 1
268 #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
269 # ifndef WIN32_LEAN_AND_MEAN
270 # define WIN32_LEAN_AND_MEAN 1
273 # include <windows.h>
274 # include <winbase.h>
277 #include "gc_locks.h"
279 #define GC_WORD_MAX (~(word)0)
281 # ifdef STACK_GROWS_DOWN
282 # define COOLER_THAN >
283 # define HOTTER_THAN <
284 # define MAKE_COOLER(x,y) if ((word)((x) + (y)) > (word)(x)) {(x) += (y);} \
285 else (x) = (ptr_t)GC_WORD_MAX
286 # define MAKE_HOTTER(x,y) (x) -= (y)
288 # define COOLER_THAN <
289 # define HOTTER_THAN >
290 # define MAKE_COOLER(x,y) if ((word)((x) - (y)) < (word)(x)) {(x) -= (y);} \
292 # define MAKE_HOTTER(x,y) (x) += (y)
295 #if defined(AMIGA) && defined(__SASC)
296 # define GC_FAR __far
302 /*********************************/
304 /* Definitions for conservative */
307 /*********************************/
309 /*********************************/
311 /* Easily changeable parameters */
313 /*********************************/
315 /* #define ALL_INTERIOR_POINTERS */
316 /* Forces all pointers into the interior of an */
317 /* object to be considered valid. Also causes the */
318 /* sizes of all objects to be inflated by at least */
319 /* one byte. This should suffice to guarantee */
320 /* that in the presence of a compiler that does */
321 /* not perform garbage-collector-unsafe */
322 /* optimizations, all portable, strictly ANSI */
323 /* conforming C programs should be safely usable */
324 /* with malloc replaced by GC_malloc and free */
325 /* calls removed. There are several disadvantages: */
326 /* 1. There are probably no interesting, portable, */
327 /* strictly ANSI conforming C programs. */
328 /* 2. This option makes it hard for the collector */
329 /* to allocate space that is not "pointed to" */
330 /* by integers, etc. Under SunOS 4.X with a */
331 /* statically linked libc, we empirically */
332 /* observed that it would be difficult to */
333 /* allocate individual objects > 100 KB. */
334 /* Even if only smaller objects are allocated, */
335 /* more swap space is likely to be needed. */
336 /* Fortunately, much of this will never be */
338 /* If you can easily avoid using this option, do. */
339 /* If not, try to keep individual objects small. */
340 /* This is now really controlled at startup, */
341 /* through GC_all_interior_pointers. */
345 #ifndef GC_NO_FINALIZATION
346 # define GC_INVOKE_FINALIZERS() GC_notify_or_invoke_finalizers()
347 GC_INNER void GC_notify_or_invoke_finalizers(void);
348 /* If GC_finalize_on_demand is not set, invoke */
349 /* eligible finalizers. Otherwise: */
350 /* Call *GC_finalizer_notifier if there are */
351 /* finalizers to be run, and we haven't called */
352 /* this procedure yet this GC cycle. */
354 GC_INNER void GC_finalize(void);
355 /* Perform all indicated finalization actions */
356 /* on unmarked objects. */
357 /* Unreachable finalizable objects are enqueued */
358 /* for processing by GC_invoke_finalizers. */
359 /* Invoked with lock. */
361 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
362 GC_INNER void GC_process_togglerefs(void);
363 /* Process the toggle-refs before GC starts. */
365 # ifndef SMALL_CONFIG
366 GC_INNER void GC_print_finalization_stats(void);
369 # define GC_INVOKE_FINALIZERS() (void)0
370 #endif /* GC_NO_FINALIZATION */
372 #if !defined(DONT_ADD_BYTE_AT_END)
374 /* Explicitly instruct the code analysis tool that */
375 /* GC_all_interior_pointers is assumed to have only 0 or 1 value. */
376 # define EXTRA_BYTES ((size_t)(GC_all_interior_pointers? 1 : 0))
378 # define EXTRA_BYTES (size_t)GC_all_interior_pointers
380 # define MAX_EXTRA_BYTES 1
382 # define EXTRA_BYTES 0
383 # define MAX_EXTRA_BYTES 0
387 # ifndef LARGE_CONFIG
388 # define MINHINCR 16 /* Minimum heap increment, in blocks of HBLKSIZE */
389 /* Must be multiple of largest page size. */
390 # define MAXHINCR 2048 /* Maximum heap increment, in blocks */
393 # define MAXHINCR 4096
396 # define BL_LIMIT GC_black_list_spacing
397 /* If we need a block of N bytes, and we have */
398 /* a block of N + BL_LIMIT bytes available, */
399 /* and N > BL_LIMIT, */
400 /* but all possible positions in it are */
401 /* blacklisted, we just use it anyway (and */
402 /* print a warning, if warnings are enabled). */
403 /* This risks subsequently leaking the block */
404 /* due to a false reference. But not using */
405 /* the block risks unreasonable immediate */
408 /*********************************/
410 /* Stack saving for debugging */
412 /*********************************/
416 word ci_pc; /* Caller, not callee, pc */
418 word ci_arg[NARGS]; /* bit-wise complement to avoid retention */
420 # if (NFRAMES * (NARGS + 1)) % 2 == 1
421 /* Likely alignment problem. */
427 #ifdef SAVE_CALL_CHAIN
428 /* Fill in the pc and argument information for up to NFRAMES of my */
429 /* callers. Ignore my frame and my callers frame. */
430 GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]);
431 GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]);
436 /*********************************/
438 /* OS interface routines */
440 /*********************************/
447 # define CLOCK_TYPE struct timeval
448 # define CLOCK_TYPE_INITIALIZER { 0, 0 }
449 # define GET_TIME(x) \
451 struct rusage rusage; \
452 getrusage(RUSAGE_SELF, &rusage); \
453 x = rusage.ru_utime; \
455 # define MS_TIME_DIFF(a,b) ((unsigned long)((long)(a.tv_sec-b.tv_sec) * 1000 \
456 + (long)(a.tv_usec - b.tv_usec) / 1000 \
457 - (a.tv_usec < b.tv_usec \
458 && (long)(a.tv_usec - b.tv_usec) % 1000 != 0 ? 1 : 0)))
459 /* "a" time is expected to be not earlier than */
460 /* "b" one; the result has unsigned long type. */
461 # define NS_FRAC_TIME_DIFF(a, b) ((unsigned long) \
462 ((a.tv_usec < b.tv_usec \
463 && (long)(a.tv_usec - b.tv_usec) % 1000 != 0 ? 1000L : 0) \
464 + (long)(a.tv_usec - b.tv_usec) % 1000) * 1000)
465 /* The total time difference could be computed as */
466 /* MS_TIME_DIFF(a,b)*1000000+NS_FRAC_TIME_DIFF(a,b).*/
467 #elif defined(MSWIN32) || defined(MSWINCE) || defined(WINXP_USE_PERF_COUNTER)
468 # if defined(MSWINRT_FLAVOR) || defined(WINXP_USE_PERF_COUNTER)
469 # define CLOCK_TYPE ULONGLONG
470 # define GET_TIME(x) \
472 LARGE_INTEGER freq, tc; \
473 if (!QueryPerformanceFrequency(&freq) \
474 || !QueryPerformanceCounter(&tc)) \
475 ABORT("QueryPerformanceCounter requires WinXP+"); \
476 x = (CLOCK_TYPE)((double)tc.QuadPart/freq.QuadPart * 1e9); \
478 /* TODO: Call QueryPerformanceFrequency once at GC init. */
479 # define MS_TIME_DIFF(a, b) ((unsigned long)(((a) - (b)) / 1000000UL))
480 # define NS_FRAC_TIME_DIFF(a, b) ((unsigned long)(((a) - (b)) % 1000000UL))
482 # define CLOCK_TYPE DWORD
483 # define GET_TIME(x) (void)(x = GetTickCount())
484 # define MS_TIME_DIFF(a, b) ((unsigned long)((a) - (b)))
485 # define NS_FRAC_TIME_DIFF(a, b) 0UL
486 # endif /* !WINXP_USE_PERF_COUNTER */
487 #elif defined(NN_PLATFORM_CTR)
488 # define CLOCK_TYPE long long
490 CLOCK_TYPE n3ds_get_system_tick(void);
491 CLOCK_TYPE n3ds_convert_tick_to_ms(CLOCK_TYPE tick);
493 # define GET_TIME(x) (void)(x = n3ds_get_system_tick())
494 # define MS_TIME_DIFF(a,b) ((unsigned long)n3ds_convert_tick_to_ms((a)-(b)))
495 # define NS_FRAC_TIME_DIFF(a, b) 0UL /* TODO: implement it */
496 #else /* !BSD_TIME && !NN_PLATFORM_CTR && !MSWIN32 && !MSWINCE */
498 # if defined(FREEBSD) && !defined(CLOCKS_PER_SEC)
499 # include <machine/limits.h>
500 # define CLOCKS_PER_SEC CLK_TCK
502 # if !defined(CLOCKS_PER_SEC)
503 # define CLOCKS_PER_SEC 1000000
504 /* This is technically a bug in the implementation. */
505 /* ANSI requires that CLOCKS_PER_SEC be defined. But at least */
506 /* under SunOS 4.1.1, it isn't. Also note that the combination of */
507 /* ANSI C and POSIX is incredibly gross here. The type clock_t */
508 /* is used by both clock() and times(). But on some machines */
509 /* these use different notions of a clock tick, CLOCKS_PER_SEC */
510 /* seems to apply only to clock. Hence we use it here. On many */
511 /* machines, including SunOS, clock actually uses units of */
512 /* microseconds (which are not really clock ticks). */
514 # define CLOCK_TYPE clock_t
515 # define GET_TIME(x) (void)(x = clock())
516 # define MS_TIME_DIFF(a,b) (CLOCKS_PER_SEC % 1000 == 0 ? \
517 (unsigned long)((a) - (b)) / (unsigned long)(CLOCKS_PER_SEC / 1000) \
518 : ((unsigned long)((a) - (b)) * 1000) / (unsigned long)CLOCKS_PER_SEC)
519 /* Avoid using double type since some targets (like ARM) might */
520 /* require -lm option for double-to-long conversion. */
521 # define NS_FRAC_TIME_DIFF(a, b) (CLOCKS_PER_SEC <= 1000 ? 0UL \
522 : (unsigned long)(CLOCKS_PER_SEC <= (clock_t)1000000UL \
523 ? (((a) - (b)) * ((clock_t)1000000UL / CLOCKS_PER_SEC) % 1000) * 1000 \
524 : (CLOCKS_PER_SEC <= (clock_t)1000000UL * 1000 \
525 ? ((a) - (b)) * ((clock_t)1000000UL * 1000 / CLOCKS_PER_SEC) \
526 : (((a) - (b)) * (clock_t)1000000UL * 1000) / CLOCKS_PER_SEC) \
527 % (clock_t)1000000UL))
528 #endif /* !BSD_TIME && !MSWIN32 */
529 # ifndef CLOCK_TYPE_INITIALIZER
530 /* This is used to initialize CLOCK_TYPE variables (to some value) */
531 /* to avoid "variable might be uninitialized" compiler warnings. */
532 # define CLOCK_TYPE_INITIALIZER 0
534 #endif /* !NO_CLOCK */
536 /* We use bzero and bcopy internally. They may not be available. */
537 # if defined(SPARC) && defined(SUNOS4) \
538 || (defined(M68K) && defined(NEXT)) || defined(VAX)
539 # define BCOPY_EXISTS
540 # elif defined(AMIGA) || defined(DARWIN)
542 # define BCOPY_EXISTS
543 # elif defined(MACOS) && defined(POWERPC)
544 # include <MacMemory.h>
545 # define bcopy(x,y,n) BlockMoveData(x, y, n)
546 # define bzero(x,n) BlockZero(x, n)
547 # define BCOPY_EXISTS
550 # if !defined(BCOPY_EXISTS) || defined(CPPCHECK)
552 # define BCOPY(x,y,n) memcpy(y, x, (size_t)(n))
553 # define BZERO(x,n) memset(x, 0, (size_t)(n))
555 # define BCOPY(x,y,n) bcopy((void *)(x),(void *)(y),(size_t)(n))
556 # define BZERO(x,n) bzero((void *)(x),(size_t)(n))
560 # include "th/PCR_ThCtl.h"
566 * Stop and restart mutator threads.
569 # define STOP_WORLD() \
570 PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_stopNormal, \
571 PCR_allSigsBlocked, \
573 # define START_WORLD() \
574 PCR_ThCtl_SetExclusiveMode(PCR_ThCtl_ExclusiveMode_null, \
575 PCR_allSigsBlocked, \
578 # if defined(NN_PLATFORM_CTR) || defined(NINTENDO_SWITCH) \
579 || defined(GC_WIN32_THREADS) || defined(GC_PTHREADS)
580 GC_INNER void GC_stop_world(void);
581 GC_INNER void GC_start_world(void);
582 # define STOP_WORLD() GC_stop_world()
583 # define START_WORLD() GC_start_world()
585 /* Just do a sanity check: we are not inside GC_do_blocking(). */
586 # define STOP_WORLD() GC_ASSERT(GC_blocked_sp == NULL)
587 # define START_WORLD()
592 GC_EXTERN GC_on_thread_event_proc GC_on_thread_event;
596 # if defined(SMALL_CONFIG) || defined(PCR)
597 # define GC_on_abort(msg) (void)0 /* be silent on abort */
599 GC_API_PRIV GC_abort_func GC_on_abort;
601 # if defined(CPPCHECK)
602 # define ABORT(msg) { GC_on_abort(msg); abort(); }
604 # define ABORT(s) PCR_Base_Panic(s)
606 # if defined(MSWIN_XBOX1) && !defined(DebugBreak)
607 # define DebugBreak() __debugbreak()
608 # elif defined(MSWINCE) && !defined(DebugBreak) \
609 && (!defined(UNDER_CE) || (defined(__MINGW32CE__) && !defined(ARM32)))
610 /* This simplifies linking for WinCE (and, probably, doesn't */
611 /* hurt debugging much); use -DDebugBreak=DebugBreak to override */
612 /* this behavior if really needed. This is also a workaround for */
613 /* x86mingw32ce toolchain (if it is still declaring DebugBreak() */
614 /* instead of defining it as a macro). */
615 # define DebugBreak() _exit(-1) /* there is no abort() in WinCE */
617 # if defined(MSWIN32) && (defined(NO_DEBUGGING) || defined(LINT2))
618 /* A more user-friendly abort after showing fatal message. */
619 # define ABORT(msg) (GC_on_abort(msg), _exit(-1))
620 /* Exit on error without running "at-exit" callbacks. */
621 # elif defined(MSWINCE) && defined(NO_DEBUGGING)
622 # define ABORT(msg) (GC_on_abort(msg), ExitProcess(-1))
623 # elif defined(MSWIN32) || defined(MSWINCE)
624 # if defined(_CrtDbgBreak) && defined(_DEBUG) && defined(_MSC_VER)
625 # define ABORT(msg) { GC_on_abort(msg); \
626 _CrtDbgBreak() /* __debugbreak() */; }
628 # define ABORT(msg) { GC_on_abort(msg); DebugBreak(); }
629 /* Note that: on a WinCE box, this could be silently */
630 /* ignored (i.e., the program is not aborted); */
631 /* DebugBreak is a statement in some toolchains. */
634 # define ABORT(msg) (GC_on_abort(msg), abort())
635 # endif /* !MSWIN32 */
638 /* For abort message with 1-3 arguments. C_msg and C_fmt should be */
639 /* literals. C_msg should not contain format specifiers. Arguments */
640 /* should match their format specifiers. */
641 #define ABORT_ARG1(C_msg, C_fmt, arg1) \
642 MACRO_BLKSTMT_BEGIN \
643 GC_INFOLOG_PRINTF(C_msg /* + */ C_fmt "\n", arg1); \
646 #define ABORT_ARG2(C_msg, C_fmt, arg1, arg2) \
647 MACRO_BLKSTMT_BEGIN \
648 GC_INFOLOG_PRINTF(C_msg /* + */ C_fmt "\n", arg1, arg2); \
651 #define ABORT_ARG3(C_msg, C_fmt, arg1, arg2, arg3) \
652 MACRO_BLKSTMT_BEGIN \
653 GC_INFOLOG_PRINTF(C_msg /* + */ C_fmt "\n", \
658 /* Same as ABORT but does not have 'no-return' attribute. */
659 /* ABORT on a dummy condition (which is always true). */
660 #define ABORT_RET(msg) \
661 if ((signed_word)GC_current_warn_proc == -1) {} else ABORT(msg)
663 /* Exit abnormally, but without making a mess (e.g. out of memory) */
665 # define EXIT() PCR_Base_Exit(1,PCR_waitForever)
667 # define EXIT() (GC_on_abort(NULL), exit(1 /* EXIT_FAILURE */))
670 /* Print warning message, e.g. almost out of memory. */
671 /* The argument (if any) format specifier should be: */
672 /* "%s", "%p" or "%"WARN_PRIdPTR. */
673 #define WARN(msg, arg) \
674 (*GC_current_warn_proc)((/* no const */ char *)("GC Warning: " msg), \
676 GC_EXTERN GC_warn_proc GC_current_warn_proc;
678 /* Print format type macro for decimal signed_word value passed WARN(). */
679 /* This could be redefined for Win64 or LLP64, but typically should */
680 /* not be done as the WARN format string is, possibly, processed on the */
681 /* client side, so non-standard print type modifiers (like MS "I64d") */
682 /* should be avoided here if possible. */
684 /* Assume sizeof(void *) == sizeof(long) (or a little-endian machine) */
685 # define WARN_PRIdPTR "ld"
688 /* A tagging macro (for a code static analyzer) to indicate that the */
689 /* string obtained from an untrusted source (e.g., argv[], getenv) is */
690 /* safe to use in a vulnerable operation (e.g., open, exec). */
691 #define TRUSTED_STRING(s) (char*)COVERT_DATAFLOW(s)
693 /* Get environment entry */
694 #ifdef GC_READ_ENV_FILE
695 GC_INNER char * GC_envfile_getenv(const char *name);
696 # define GETENV(name) GC_envfile_getenv(name)
697 #elif defined(NO_GETENV) && !defined(CPPCHECK)
698 # define GETENV(name) NULL
699 #elif defined(EMPTY_GETENV_RESULTS)
700 /* Workaround for a reputed Wine bug. */
701 GC_INLINE char * fixed_getenv(const char *name)
703 char *value = getenv(name);
704 return value != NULL && *value != '\0' ? value : NULL;
706 # define GETENV(name) fixed_getenv(name)
708 # define GETENV(name) getenv(name)
714 # include <mach/thread_status.h>
715 # ifndef MAC_OS_X_VERSION_MAX_ALLOWED
716 # include <AvailabilityMacros.h>
717 /* Include this header just to import the above macro. */
719 # if defined(POWERPC)
720 # if CPP_WORDSZ == 32
721 # define GC_THREAD_STATE_T ppc_thread_state_t
723 # define GC_THREAD_STATE_T ppc_thread_state64_t
724 # define GC_MACH_THREAD_STATE PPC_THREAD_STATE64
725 # define GC_MACH_THREAD_STATE_COUNT PPC_THREAD_STATE64_COUNT
727 # elif defined(I386) || defined(X86_64)
728 # if CPP_WORDSZ == 32
729 # if defined(i386_THREAD_STATE_COUNT) && !defined(x86_THREAD_STATE32_COUNT)
730 /* Use old naming convention for 32-bit x86. */
731 # define GC_THREAD_STATE_T i386_thread_state_t
732 # define GC_MACH_THREAD_STATE i386_THREAD_STATE
733 # define GC_MACH_THREAD_STATE_COUNT i386_THREAD_STATE_COUNT
735 # define GC_THREAD_STATE_T x86_thread_state32_t
736 # define GC_MACH_THREAD_STATE x86_THREAD_STATE32
737 # define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE32_COUNT
740 # define GC_THREAD_STATE_T x86_thread_state64_t
741 # define GC_MACH_THREAD_STATE x86_THREAD_STATE64
742 # define GC_MACH_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT
744 # elif defined(ARM32) && defined(ARM_UNIFIED_THREAD_STATE) \
745 && !defined(CPPCHECK)
746 # define GC_THREAD_STATE_T arm_unified_thread_state_t
747 # define GC_MACH_THREAD_STATE ARM_UNIFIED_THREAD_STATE
748 # define GC_MACH_THREAD_STATE_COUNT ARM_UNIFIED_THREAD_STATE_COUNT
749 # elif defined(ARM32)
750 # define GC_THREAD_STATE_T arm_thread_state_t
751 # ifdef ARM_MACHINE_THREAD_STATE_COUNT
752 # define GC_MACH_THREAD_STATE ARM_MACHINE_THREAD_STATE
753 # define GC_MACH_THREAD_STATE_COUNT ARM_MACHINE_THREAD_STATE_COUNT
755 # elif defined(AARCH64)
756 # define GC_THREAD_STATE_T arm_thread_state64_t
757 # define GC_MACH_THREAD_STATE ARM_THREAD_STATE64
758 # define GC_MACH_THREAD_STATE_COUNT ARM_THREAD_STATE64_COUNT
759 # elif !defined(CPPCHECK)
760 # error define GC_THREAD_STATE_T
762 # ifndef GC_MACH_THREAD_STATE
763 # define GC_MACH_THREAD_STATE MACHINE_THREAD_STATE
764 # define GC_MACH_THREAD_STATE_COUNT MACHINE_THREAD_STATE_COUNT
767 # if CPP_WORDSZ == 32
768 # define GC_MACH_HEADER mach_header
769 # define GC_MACH_SECTION section
770 # define GC_GETSECTBYNAME getsectbynamefromheader
772 # define GC_MACH_HEADER mach_header_64
773 # define GC_MACH_SECTION section_64
774 # define GC_GETSECTBYNAME getsectbynamefromheader_64
777 /* Try to work out the right way to access thread state structure */
778 /* members. The structure has changed its definition in different */
779 /* Darwin versions. This now defaults to the (older) names */
780 /* without __, thus hopefully, not breaking any existing */
781 /* Makefile.direct builds. */
783 # define THREAD_FLD_NAME(x) __ ## x
785 # define THREAD_FLD_NAME(x) x
787 # if defined(ARM32) && defined(ARM_UNIFIED_THREAD_STATE)
788 # define THREAD_FLD(x) ts_32.THREAD_FLD_NAME(x)
790 # define THREAD_FLD(x) THREAD_FLD_NAME(x)
794 #include "../gc_tiny_fl.h"
798 #if __STDC_VERSION__ >= 201112L
799 # include <assert.h> /* for static_assert */
804 /*********************************/
806 /* Word-size-dependent defines */
808 /*********************************/
811 # define WORDS_TO_BYTES(x) ((x)<<2)
812 # define BYTES_TO_WORDS(x) ((x)>>2)
813 # define LOGWL ((word)5) /* log[2] of CPP_WORDSZ */
814 # define modWORDSZ(n) ((n) & 0x1f) /* n mod size of word */
816 # define UNALIGNED_PTRS
821 # define WORDS_TO_BYTES(x) ((x)<<3)
822 # define BYTES_TO_WORDS(x) ((x)>>3)
823 # define LOGWL ((word)6) /* log[2] of CPP_WORDSZ */
824 # define modWORDSZ(n) ((n) & 0x3f) /* n mod size of word */
826 # define UNALIGNED_PTRS
830 /* The first TINY_FREELISTS free lists correspond to the first */
831 /* TINY_FREELISTS multiples of GRANULE_BYTES, i.e. we keep */
832 /* separate free lists for each multiple of GRANULE_BYTES */
833 /* up to (TINY_FREELISTS-1) * GRANULE_BYTES. After that they */
834 /* may be spread out further. */
836 #define GRANULE_BYTES GC_GRANULE_BYTES
837 #define TINY_FREELISTS GC_TINY_FREELISTS
839 #define WORDSZ ((word)CPP_WORDSZ)
840 #define SIGNB ((word)1 << (WORDSZ-1))
841 #define BYTES_PER_WORD ((word)(sizeof (word)))
842 #define divWORDSZ(n) ((n) >> LOGWL) /* divide n by size of word */
844 #if GRANULE_BYTES == 8
845 # define BYTES_TO_GRANULES(n) ((n)>>3)
846 # define GRANULES_TO_BYTES(n) ((n)<<3)
847 # if CPP_WORDSZ == 64
848 # define GRANULES_TO_WORDS(n) (n)
849 # elif CPP_WORDSZ == 32
850 # define GRANULES_TO_WORDS(n) ((n)<<1)
852 # define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
854 #elif GRANULE_BYTES == 16
855 # define BYTES_TO_GRANULES(n) ((n)>>4)
856 # define GRANULES_TO_BYTES(n) ((n)<<4)
857 # if CPP_WORDSZ == 64
858 # define GRANULES_TO_WORDS(n) ((n)<<1)
859 # elif CPP_WORDSZ == 32
860 # define GRANULES_TO_WORDS(n) ((n)<<2)
862 # define GRANULES_TO_WORDS(n) BYTES_TO_WORDS(GRANULES_TO_BYTES(n))
865 # error Bad GRANULE_BYTES value
868 /*********************/
870 /* Size Parameters */
872 /*********************/
874 /* Heap block size, bytes. Should be power of 2. */
875 /* Incremental GC with MPROTECT_VDB currently requires the */
876 /* page size to be a multiple of HBLKSIZE. Since most modern */
877 /* architectures support variable page sizes down to 4 KB, and */
878 /* X86 is generally 4 KB, we now default to 4 KB, except for */
879 /* Alpha: Seems to be used with 8 KB pages. */
880 /* SMALL_CONFIG: Want less block-level fragmentation. */
882 # if defined(LARGE_CONFIG) || !defined(SMALL_CONFIG)
884 # define CPP_LOG_HBLKSIZE 13
885 # elif defined(SN_TARGET_ORBIS) || defined(SN_TARGET_PSP2)
886 # define CPP_LOG_HBLKSIZE 16 /* page size is set to 64 KB */
888 # define CPP_LOG_HBLKSIZE 12
891 # define CPP_LOG_HBLKSIZE 10
895 # define CPP_LOG_HBLKSIZE 9
896 # elif HBLKSIZE == 1024
897 # define CPP_LOG_HBLKSIZE 10
898 # elif HBLKSIZE == 2048
899 # define CPP_LOG_HBLKSIZE 11
900 # elif HBLKSIZE == 4096
901 # define CPP_LOG_HBLKSIZE 12
902 # elif HBLKSIZE == 8192
903 # define CPP_LOG_HBLKSIZE 13
904 # elif HBLKSIZE == 16384
905 # define CPP_LOG_HBLKSIZE 14
906 # elif !defined(CPPCHECK)
907 # error Bad HBLKSIZE value
912 # define CPP_HBLKSIZE (1 << CPP_LOG_HBLKSIZE)
913 # define LOG_HBLKSIZE ((size_t)CPP_LOG_HBLKSIZE)
914 # define HBLKSIZE ((size_t)CPP_HBLKSIZE)
916 #define GC_SQRT_SIZE_MAX ((((size_t)1) << (WORDSZ / 2)) - 1)
918 /* Max size objects supported by freelist (larger objects are */
919 /* allocated directly with allchblk(), by rounding to the next */
920 /* multiple of HBLKSIZE). */
921 #define CPP_MAXOBJBYTES (CPP_HBLKSIZE/2)
922 #define MAXOBJBYTES ((size_t)CPP_MAXOBJBYTES)
923 #define CPP_MAXOBJWORDS BYTES_TO_WORDS(CPP_MAXOBJBYTES)
924 #define MAXOBJWORDS ((size_t)CPP_MAXOBJWORDS)
925 #define CPP_MAXOBJGRANULES BYTES_TO_GRANULES(CPP_MAXOBJBYTES)
926 #define MAXOBJGRANULES ((size_t)CPP_MAXOBJGRANULES)
928 # define divHBLKSZ(n) ((n) >> LOG_HBLKSIZE)
930 # define HBLK_PTR_DIFF(p,q) divHBLKSZ((ptr_t)p - (ptr_t)q)
931 /* Equivalent to subtracting 2 hblk pointers. */
932 /* We do it this way because a compiler should */
933 /* find it hard to use an integer division */
934 /* instead of a shift. The bundled SunOS 4.1 */
935 /* o.w. sometimes pessimizes the subtraction to */
936 /* involve a call to .div. */
938 # define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
940 # define HBLKPTR(objptr) ((struct hblk *)(((word)(objptr)) \
941 & ~(word)(HBLKSIZE-1)))
942 # define HBLKDISPL(objptr) (((size_t) (objptr)) & (HBLKSIZE-1))
944 /* Round up allocation size (in bytes) to a multiple of a granule. */
945 #define ROUNDUP_GRANULE_SIZE(lb) /* lb should have no side-effect */ \
946 (SIZET_SAT_ADD(lb, GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1))
948 /* Round up byte allocation requests to integral number of words, etc. */
949 # define ROUNDED_UP_GRANULES(lb) /* lb should have no side-effect */ \
950 BYTES_TO_GRANULES(SIZET_SAT_ADD(lb, GRANULE_BYTES - 1 + EXTRA_BYTES))
951 # if MAX_EXTRA_BYTES == 0
952 # define SMALL_OBJ(bytes) EXPECT((bytes) <= (MAXOBJBYTES), TRUE)
954 # define SMALL_OBJ(bytes) \
955 (EXPECT((bytes) <= (MAXOBJBYTES - MAX_EXTRA_BYTES), TRUE) \
956 || (bytes) <= MAXOBJBYTES - EXTRA_BYTES)
957 /* This really just tests bytes <= MAXOBJBYTES - EXTRA_BYTES. */
958 /* But we try to avoid looking up EXTRA_BYTES. */
960 # define ADD_SLOP(lb) /* lb should have no side-effect */ \
961 SIZET_SAT_ADD(lb, EXTRA_BYTES)
964 * Hash table representation of sets of pages.
965 * Implements a map from aligned HBLKSIZE chunks of the address space to one
967 * This assumes it is OK to spuriously set bits, e.g. because multiple
968 * addresses are represented by a single location.
969 * Used by black-listing code, and perhaps by dirty bit maintenance code.
973 # if CPP_WORDSZ == 32
974 # define LOG_PHT_ENTRIES 20 /* Collisions likely at 1M blocks, */
975 /* which is >= 4 GB. Each table takes */
976 /* 128 KB, some of which may never be */
979 # define LOG_PHT_ENTRIES 21 /* Collisions likely at 2M blocks, */
980 /* which is >= 8 GB. Each table takes */
981 /* 256 KB, some of which may never be */
984 # elif !defined(SMALL_CONFIG)
985 # define LOG_PHT_ENTRIES 18 /* Collisions are likely if heap grows */
986 /* to more than 256K hblks >= 1 GB. */
987 /* Each hash table occupies 32 KB. */
988 /* Even for somewhat smaller heaps, */
989 /* say half that, collisions may be an */
990 /* issue because we blacklist */
991 /* addresses outside the heap. */
993 # define LOG_PHT_ENTRIES 15 /* Collisions are likely if heap grows */
994 /* to more than 32K hblks (128 MB). */
995 /* Each hash table occupies 4 KB. */
997 # define PHT_ENTRIES ((word)1 << LOG_PHT_ENTRIES)
998 # define PHT_SIZE (PHT_ENTRIES >> LOGWL)
999 typedef word page_hash_table[PHT_SIZE];
1001 # define PHT_HASH(addr) ((((word)(addr)) >> LOG_HBLKSIZE) & (PHT_ENTRIES - 1))
1003 # define get_pht_entry_from_index(bl, index) \
1004 (((bl)[divWORDSZ(index)] >> modWORDSZ(index)) & 1)
1005 # define set_pht_entry_from_index(bl, index) \
1006 (void)((bl)[divWORDSZ(index)] |= (word)1 << modWORDSZ(index))
1008 #if defined(THREADS) && defined(AO_HAVE_or)
1009 /* And, one more version for GC_add_to_black_list_normal/stack */
1010 /* (invoked indirectly by GC_do_local_mark) and */
1011 /* async_set_pht_entry_from_index (invoked by GC_dirty or the write */
1012 /* fault handler). */
1013 # define set_pht_entry_from_index_concurrent(bl, index) \
1014 AO_or((volatile AO_t *)&(bl)[divWORDSZ(index)], \
1015 (AO_t)((word)1 << modWORDSZ(index)))
1017 # define set_pht_entry_from_index_concurrent(bl, index) \
1018 set_pht_entry_from_index(bl, index)
1022 /********************************************/
1024 /* H e a p B l o c k s */
1026 /********************************************/
1028 /* heap block header */
1029 #define HBLKMASK (HBLKSIZE-1)
1031 #define MARK_BITS_PER_HBLK (HBLKSIZE/GRANULE_BYTES)
1033 /* We allocate 1 bit per allocation granule. */
1034 /* If MARK_BIT_PER_GRANULE is defined, we use */
1035 /* every nth bit, where n is the number of */
1036 /* allocation granules per object. If */
1037 /* MARK_BIT_PER_OBJ is defined, we only use the */
1038 /* initial group of mark bits, and it is safe */
1039 /* to allocate smaller header for large objects. */
1041 union word_ptr_ao_u {
1045 # ifdef PARALLEL_MARK
1050 /* We maintain layout maps for heap blocks containing objects of a given */
1051 /* size. Each entry in this map describes a byte offset and has the */
1052 /* following type. */
1054 struct hblk * hb_next; /* Link field for hblk free list */
1055 /* and for lists of chunks waiting to be */
1057 struct hblk * hb_prev; /* Backwards link for free list. */
1058 struct hblk * hb_block; /* The corresponding block. */
1059 unsigned char hb_obj_kind;
1060 /* Kind of objects in the block. Each kind */
1061 /* identifies a mark procedure and a set of */
1062 /* list headers. Sometimes called regions. */
1063 unsigned char hb_flags;
1064 # define IGNORE_OFF_PAGE 1 /* Ignore pointers that do not */
1065 /* point to the first page of */
1067 # define WAS_UNMAPPED 2 /* This is a free block, which has */
1068 /* been unmapped from the address */
1070 /* GC_remap must be invoked on it */
1071 /* before it can be reallocated. */
1072 /* Only set with USE_MUNMAP. */
1073 # define FREE_BLK 4 /* Block is free, i.e. not in use. */
1074 # ifdef ENABLE_DISCLAIM
1075 # define HAS_DISCLAIM 8
1076 /* This kind has a callback on reclaim. */
1077 # define MARK_UNCONDITIONALLY 0x10
1078 /* Mark from all objects, marked or */
1079 /* not. Used to mark objects needed by */
1080 /* reclaim notifier. */
1082 # ifdef MARK_BIT_PER_GRANULE
1083 # define LARGE_BLOCK 0x20
1085 unsigned short hb_last_reclaimed;
1086 /* Value of GC_gc_no when block was */
1087 /* last allocated or swept. May wrap. */
1088 /* For a free block, this is maintained */
1089 /* only for USE_MUNMAP, and indicates */
1090 /* when the header was allocated, or */
1091 /* when the size of the block last */
1093 # ifdef MARK_BIT_PER_OBJ
1094 unsigned32 hb_inv_sz; /* A good upper bound for 2**32/hb_sz. */
1095 /* For large objects, we use */
1097 # define LARGE_INV_SZ (1 << 16)
1099 word hb_sz; /* If in use, size in bytes, of objects in the block. */
1100 /* if free, the size in bytes of the whole block. */
1101 /* We assume that this is convertible to signed_word */
1102 /* without generating a negative result. We avoid */
1103 /* generating free blocks larger than that. */
1104 word hb_descr; /* object descriptor for marking. See */
1106 # ifdef MARK_BIT_PER_GRANULE
1107 unsigned short * hb_map; /* Essentially a table of remainders */
1108 /* mod BYTES_TO_GRANULES(hb_sz), except */
1109 /* for large blocks. See GC_obj_map. */
1111 # ifdef PARALLEL_MARK
1112 volatile AO_t hb_n_marks; /* Number of set mark bits, excluding */
1113 /* the one always set at the end. */
1114 /* Currently it is concurrently */
1115 /* updated and hence only approximate. */
1116 /* But a zero value does guarantee that */
1117 /* the block contains no marked */
1119 /* Ensuring this property means that we */
1120 /* never decrement it to zero during a */
1121 /* collection, and hence the count may */
1122 /* be one too high. Due to concurrent */
1123 /* updates, an arbitrary number of */
1124 /* increments, but not all of them (!) */
1125 /* may be lost, hence it may in theory */
1126 /* be much too low. */
1127 /* The count may also be too high if */
1128 /* multiple mark threads mark the */
1129 /* same object due to a race. */
1131 size_t hb_n_marks; /* Without parallel marking, the count */
1134 # ifdef USE_MARK_BYTES
1135 # define MARK_BITS_SZ (MARK_BITS_PER_HBLK + 1)
1136 /* Unlike the other case, this is in units of bytes. */
1137 /* Since we force double-word alignment, we need at most one */
1138 /* mark bit per 2 words. But we do allocate and set one */
1139 /* extra mark bit to avoid an explicit check for the */
1140 /* partial object at the end of each block. */
1142 char _hb_marks[MARK_BITS_SZ];
1143 /* The i'th byte is 1 if the object */
1144 /* starting at granule i or object i is */
1145 /* marked, 0 o.w. */
1146 /* The mark bit for the "one past the */
1147 /* end" object is always set to avoid a */
1148 /* special case test in the marker. */
1149 word dummy; /* Force word alignment of mark bytes. */
1151 # define hb_marks _mark_byte_union._hb_marks
1153 # define MARK_BITS_SZ (MARK_BITS_PER_HBLK/CPP_WORDSZ + 1)
1154 word hb_marks[MARK_BITS_SZ];
1155 # endif /* !USE_MARK_BYTES */
1158 # define ANY_INDEX 23 /* "Random" mark bit index for assertions */
1160 /* heap block body */
1162 # define HBLK_WORDS (HBLKSIZE/sizeof(word))
1163 # define HBLK_GRANULES (HBLKSIZE/GRANULE_BYTES)
1165 /* The number of objects in a block dedicated to a certain size. */
1166 /* may erroneously yield zero (instead of one) for large objects. */
1167 # define HBLK_OBJS(sz_in_bytes) (HBLKSIZE/(sz_in_bytes))
1170 char hb_body[HBLKSIZE];
1173 # define HBLK_IS_FREE(hdr) (((hdr) -> hb_flags & FREE_BLK) != 0)
1175 # define OBJ_SZ_TO_BLOCKS(lb) divHBLKSZ((lb) + HBLKSIZE-1)
1176 # define OBJ_SZ_TO_BLOCKS_CHECKED(lb) /* lb should have no side-effect */ \
1177 divHBLKSZ(SIZET_SAT_ADD(lb, HBLKSIZE - 1))
1178 /* Size of block (in units of HBLKSIZE) needed to hold objects of */
1179 /* given lb (in bytes). The checked variant prevents wrap around. */
1181 /* Object free list link */
1182 # define obj_link(p) (*(void **)(p))
1184 # define LOG_MAX_MARK_PROCS 6
1185 # define MAX_MARK_PROCS (1 << LOG_MAX_MARK_PROCS)
1187 /* Root sets. Logically private to mark_rts.c. But we don't want the */
1188 /* tables scanned, so we put them here. */
1189 /* MAX_ROOT_SETS is the maximum number of ranges that can be */
1190 /* registered as static roots. */
1191 # ifdef LARGE_CONFIG
1192 # define MAX_ROOT_SETS 8192
1193 # elif !defined(SMALL_CONFIG)
1194 # define MAX_ROOT_SETS 2048
1196 # define MAX_ROOT_SETS 512
1199 # define MAX_EXCLUSIONS (MAX_ROOT_SETS/4)
1200 /* Maximum number of segments that can be excluded from root sets. */
1203 * Data structure for excluded static roots.
1210 /* Data structure for list of root sets. */
1211 /* We keep a hash table, so that we can filter out duplicate additions. */
1212 /* Under Win32, we need to do a better job of filtering overlaps, so */
1213 /* we resort to sequential search, and pay the price. */
1215 ptr_t r_start;/* multiple of word size */
1216 ptr_t r_end; /* multiple of word size and greater than r_start */
1217 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
1218 struct roots * r_next;
1221 /* Delete before registering new dynamic libraries */
1224 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
1225 /* Size of hash table index to roots. */
1226 # define LOG_RT_SIZE 6
1227 # define RT_SIZE (1 << LOG_RT_SIZE) /* Power of 2, may be != MAX_ROOT_SETS */
1230 #ifndef MAX_HEAP_SECTS
1231 # ifdef LARGE_CONFIG
1232 # if CPP_WORDSZ > 32
1233 # define MAX_HEAP_SECTS 81920
1235 # define MAX_HEAP_SECTS 7680
1237 # elif defined(SMALL_CONFIG) && !defined(USE_PROC_FOR_LIBRARIES)
1238 # if defined(PARALLEL_MARK) && (defined(MSWIN32) || defined(CYGWIN32))
1239 # define MAX_HEAP_SECTS 384
1241 # define MAX_HEAP_SECTS 128 /* Roughly 256 MB (128*2048*1024) */
1243 # elif CPP_WORDSZ > 32
1244 # define MAX_HEAP_SECTS 1024 /* Roughly 8 GB */
1246 # define MAX_HEAP_SECTS 512 /* Roughly 4 GB */
1248 #endif /* !MAX_HEAP_SECTS */
1250 typedef struct GC_ms_entry {
1251 ptr_t mse_start; /* First word of object, word aligned. */
1252 union word_ptr_ao_u mse_descr;
1253 /* Descriptor; low order two bits are tags, */
1254 /* as described in gc_mark.h. */
1257 /* Lists of all heap blocks and free lists */
1258 /* as well as other random data structures */
1259 /* that should not be scanned by the */
1261 /* These are grouped together in a struct */
1262 /* so that they can be easily skipped by the */
1263 /* GC_mark routine. */
1264 /* The ordering is weird to make GC_malloc */
1265 /* faster by keeping the important fields */
1266 /* sufficiently close together that a */
1267 /* single load of a base register will do. */
1268 /* Scalars that could easily appear to */
1269 /* be pointers are also put here. */
1270 /* The main fields should precede any */
1271 /* conditionally included fields, so that */
1272 /* gc_inline.h will work even if a different */
1273 /* set of macros is defined when the client is */
1277 word _heapsize; /* Heap size in bytes (value never goes down). */
1278 word _requested_heapsize; /* Heap size due to explicit expansion. */
1279 ptr_t _last_heap_addr;
1280 ptr_t _prev_heap_addr;
1281 word _large_free_bytes;
1282 /* Total bytes contained in blocks on large object free */
1284 word _large_allocd_bytes;
1285 /* Total number of bytes in allocated large objects blocks. */
1286 /* For the purposes of this counter and the next one only, a */
1287 /* large object is one that occupies a block of at least */
1289 word _max_large_allocd_bytes;
1290 /* Maximum number of bytes that were ever allocated in */
1291 /* large object blocks. This is used to help decide when it */
1292 /* is safe to split up a large block. */
1293 word _bytes_allocd_before_gc;
1294 /* Number of bytes allocated before this */
1295 /* collection cycle. */
1296 # ifndef SEPARATE_GLOBALS
1297 # define GC_bytes_allocd GC_arrays._bytes_allocd
1299 /* Number of bytes allocated during this collection cycle. */
1301 word _bytes_dropped;
1302 /* Number of black-listed bytes dropped during GC cycle */
1303 /* as a result of repeated scanning during allocation */
1304 /* attempts. These are treated largely as allocated, */
1305 /* even though they are not useful to the client. */
1306 word _bytes_finalized;
1307 /* Approximate number of bytes in objects (and headers) */
1308 /* that became ready for finalization in the last */
1311 /* Number of explicitly deallocated bytes of memory */
1312 /* since last collection. */
1313 word _finalizer_bytes_freed;
1314 /* Bytes of memory explicitly deallocated while */
1315 /* finalizers were running. Used to approximate memory */
1316 /* explicitly deallocated by finalizers. */
1317 ptr_t _scratch_end_ptr;
1318 ptr_t _scratch_last_end_ptr;
1319 /* Used by headers.c, and can easily appear to point to */
1320 /* heap. Also used by GC_register_dynamic_libraries(). */
1322 /* Limits of stack for GC_mark routine. All ranges */
1323 /* between GC_mark_stack (incl.) and GC_mark_stack_top */
1324 /* (incl.) still need to be marked from. */
1325 mse *_mark_stack_limit;
1326 # ifdef PARALLEL_MARK
1327 mse *volatile _mark_stack_top;
1328 /* Updated only with mark lock held, but read asynchronously. */
1329 /* TODO: Use union to avoid casts to AO_t */
1331 mse *_mark_stack_top;
1333 word _composite_in_use; /* Number of bytes in the accessible */
1334 /* composite objects. */
1335 word _atomic_in_use; /* Number of bytes in the accessible */
1336 /* atomic objects. */
1338 # define GC_unmapped_bytes GC_arrays._unmapped_bytes
1339 word _unmapped_bytes;
1341 # define GC_unmapped_bytes 0
1343 bottom_index * _all_nils;
1344 # ifdef ENABLE_TRACE
1345 # define GC_trace_addr GC_arrays._trace_addr
1348 GC_mark_proc _mark_procs[MAX_MARK_PROCS];
1349 /* Table of user-defined mark procedures. There is */
1350 /* a small number of these, which can be referenced */
1351 /* by DS_PROC mark descriptors. See gc_mark.h. */
1352 char _modws_valid_offsets[sizeof(word)];
1353 /* GC_valid_offsets[i] ==> */
1354 /* GC_modws_valid_offsets[i%sizeof(word)] */
1355 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
1356 # define GC_root_index GC_arrays._root_index
1357 struct roots * _root_index[RT_SIZE];
1359 # ifdef SAVE_CALL_CHAIN
1360 # define GC_last_stack GC_arrays._last_stack
1361 struct callinfo _last_stack[NFRAMES];
1362 /* Stack at last garbage collection. Useful for */
1363 /* debugging mysterious object disappearances. In the */
1364 /* multi-threaded case, we currently only save the */
1365 /* calling stack. */
1367 # ifndef SEPARATE_GLOBALS
1368 # define GC_objfreelist GC_arrays._objfreelist
1369 void *_objfreelist[MAXOBJGRANULES+1];
1370 /* free list for objects */
1371 # define GC_aobjfreelist GC_arrays._aobjfreelist
1372 void *_aobjfreelist[MAXOBJGRANULES+1];
1373 /* free list for atomic objects */
1375 void *_uobjfreelist[MAXOBJGRANULES+1];
1376 /* Uncollectible but traced objects. */
1377 /* Objects on this and _auobjfreelist */
1378 /* are always marked, except during */
1379 /* garbage collections. */
1380 # ifdef GC_ATOMIC_UNCOLLECTABLE
1381 # define GC_auobjfreelist GC_arrays._auobjfreelist
1382 void *_auobjfreelist[MAXOBJGRANULES+1];
1383 /* Atomic uncollectible but traced objects. */
1385 size_t _size_map[MAXOBJBYTES+1];
1386 /* Number of granules to allocate when asked for a certain */
1387 /* number of bytes. Should be accessed with the allocation */
1389 # ifdef MARK_BIT_PER_GRANULE
1390 # define GC_obj_map GC_arrays._obj_map
1391 unsigned short * _obj_map[MAXOBJGRANULES + 1];
1392 /* If not NULL, then a pointer to a map of valid */
1393 /* object addresses. */
1394 /* _obj_map[sz_in_granules][i] is */
1395 /* i % sz_in_granules. */
1396 /* This is now used purely to replace a */
1397 /* division in the marker by a table lookup. */
1398 /* _obj_map[0] is used for large objects and */
1399 /* contains all nonzero entries. This gets us */
1400 /* out of the marker fast path without an extra */
1402 # define MAP_LEN BYTES_TO_GRANULES(HBLKSIZE)
1404 # define VALID_OFFSET_SZ HBLKSIZE
1405 char _valid_offsets[VALID_OFFSET_SZ];
1406 /* GC_valid_offsets[i] == TRUE ==> i */
1407 /* is registered as a displacement. */
1408 # ifndef GC_DISABLE_INCREMENTAL
1409 # define GC_grungy_pages GC_arrays._grungy_pages
1410 page_hash_table _grungy_pages; /* Pages that were dirty at last */
1411 /* GC_read_dirty. */
1412 # define GC_dirty_pages GC_arrays._dirty_pages
1413 volatile page_hash_table _dirty_pages;
1414 /* Pages dirtied since last GC_read_dirty. */
1416 # if (defined(CHECKSUMS) && defined(GWW_VDB)) || defined(PROC_VDB)
1417 # define GC_written_pages GC_arrays._written_pages
1418 page_hash_table _written_pages; /* Pages ever dirtied */
1420 # define GC_heap_sects GC_arrays._heap_sects
1424 } _heap_sects[MAX_HEAP_SECTS]; /* Heap segments potentially */
1425 /* client objects. */
1426 # if defined(USE_PROC_FOR_LIBRARIES)
1427 # define GC_our_memory GC_arrays._our_memory
1428 struct HeapSect _our_memory[MAX_HEAP_SECTS];
1429 /* All GET_MEM allocated */
1430 /* memory. Includes block */
1431 /* headers and the like. */
1433 # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
1434 # define GC_heap_bases GC_arrays._heap_bases
1435 ptr_t _heap_bases[MAX_HEAP_SECTS];
1436 /* Start address of memory regions obtained from kernel. */
1439 # define GC_heap_lengths GC_arrays._heap_lengths
1440 word _heap_lengths[MAX_HEAP_SECTS];
1441 /* Committed lengths of memory regions obtained from kernel. */
1443 struct roots _static_roots[MAX_ROOT_SETS];
1444 struct exclusion _excl_table[MAX_EXCLUSIONS];
1445 /* Block header index; see gc_headers.h */
1446 bottom_index * _top_index[TOP_SZ];
1449 GC_API_PRIV GC_FAR struct _GC_arrays GC_arrays;
1451 #define GC_all_nils GC_arrays._all_nils
1452 #define GC_atomic_in_use GC_arrays._atomic_in_use
1453 #define GC_bytes_allocd_before_gc GC_arrays._bytes_allocd_before_gc
1454 #define GC_bytes_dropped GC_arrays._bytes_dropped
1455 #define GC_bytes_finalized GC_arrays._bytes_finalized
1456 #define GC_bytes_freed GC_arrays._bytes_freed
1457 #define GC_composite_in_use GC_arrays._composite_in_use
1458 #define GC_excl_table GC_arrays._excl_table
1459 #define GC_finalizer_bytes_freed GC_arrays._finalizer_bytes_freed
1460 #define GC_heapsize GC_arrays._heapsize
1461 #define GC_large_allocd_bytes GC_arrays._large_allocd_bytes
1462 #define GC_large_free_bytes GC_arrays._large_free_bytes
1463 #define GC_last_heap_addr GC_arrays._last_heap_addr
1464 #define GC_mark_stack GC_arrays._mark_stack
1465 #define GC_mark_stack_limit GC_arrays._mark_stack_limit
1466 #define GC_mark_stack_top GC_arrays._mark_stack_top
1467 #define GC_mark_procs GC_arrays._mark_procs
1468 #define GC_max_large_allocd_bytes GC_arrays._max_large_allocd_bytes
1469 #define GC_modws_valid_offsets GC_arrays._modws_valid_offsets
1470 #define GC_prev_heap_addr GC_arrays._prev_heap_addr
1471 #define GC_requested_heapsize GC_arrays._requested_heapsize
1472 #define GC_scratch_end_ptr GC_arrays._scratch_end_ptr
1473 #define GC_scratch_last_end_ptr GC_arrays._scratch_last_end_ptr
1474 #define GC_size_map GC_arrays._size_map
1475 #define GC_static_roots GC_arrays._static_roots
1476 #define GC_top_index GC_arrays._top_index
1477 #define GC_uobjfreelist GC_arrays._uobjfreelist
1478 #define GC_valid_offsets GC_arrays._valid_offsets
1480 #define beginGC_arrays ((ptr_t)(&GC_arrays))
1481 #define endGC_arrays (((ptr_t)(&GC_arrays)) + (sizeof GC_arrays))
1482 #define USED_HEAP_SIZE (GC_heapsize - GC_large_free_bytes)
1486 # define MAXOBJKINDS 16
1488 GC_EXTERN struct obj_kind {
1489 void **ok_freelist; /* Array of free list headers for this kind of */
1490 /* object. Point either to GC_arrays or to */
1491 /* storage allocated with GC_scratch_alloc. */
1492 struct hblk **ok_reclaim_list;
1493 /* List headers for lists of blocks waiting to */
1494 /* be swept. Indexed by object size in */
1496 word ok_descriptor; /* Descriptor template for objects in this */
1498 GC_bool ok_relocate_descr;
1499 /* Add object size in bytes to descriptor */
1500 /* template to obtain descriptor. Otherwise */
1501 /* template is used as is. */
1502 GC_bool ok_init; /* Clear objects before putting them on the free list. */
1503 # ifdef ENABLE_DISCLAIM
1504 GC_bool ok_mark_unconditionally;
1505 /* Mark from all, including unmarked, objects */
1506 /* in block. Used to protect objects reachable */
1507 /* from reclaim notifiers. */
1508 int (GC_CALLBACK *ok_disclaim_proc)(void * /*obj*/);
1509 /* The disclaim procedure is called before obj */
1510 /* is reclaimed, but must also tolerate being */
1511 /* called with object from freelist. Non-zero */
1512 /* exit prevents object from being reclaimed. */
1513 # define OK_DISCLAIM_INITZ /* comma */, FALSE, 0
1515 # define OK_DISCLAIM_INITZ /* empty */
1516 # endif /* !ENABLE_DISCLAIM */
1517 } GC_obj_kinds[MAXOBJKINDS];
1519 #define beginGC_obj_kinds ((ptr_t)(&GC_obj_kinds))
1520 #define endGC_obj_kinds (beginGC_obj_kinds + (sizeof GC_obj_kinds))
1522 /* Variables that used to be in GC_arrays, but need to be accessed by */
1523 /* inline allocation code. If they were in GC_arrays, the inlined */
1524 /* allocation code would include GC_arrays offsets (as it did), which */
1525 /* introduce maintenance problems. */
1527 #ifdef SEPARATE_GLOBALS
1528 extern word GC_bytes_allocd;
1529 /* Number of bytes allocated during this collection cycle. */
1530 extern ptr_t GC_objfreelist[MAXOBJGRANULES+1];
1531 /* free list for NORMAL objects */
1532 # define beginGC_objfreelist ((ptr_t)(&GC_objfreelist))
1533 # define endGC_objfreelist (beginGC_objfreelist + sizeof(GC_objfreelist))
1535 extern ptr_t GC_aobjfreelist[MAXOBJGRANULES+1];
1536 /* free list for atomic (PTRFREE) objects */
1537 # define beginGC_aobjfreelist ((ptr_t)(&GC_aobjfreelist))
1538 # define endGC_aobjfreelist (beginGC_aobjfreelist + sizeof(GC_aobjfreelist))
1539 #endif /* SEPARATE_GLOBALS */
1541 /* Predefined kinds: */
1544 #define UNCOLLECTABLE 2
1545 #ifdef GC_ATOMIC_UNCOLLECTABLE
1546 # define AUNCOLLECTABLE 3
1547 # define IS_UNCOLLECTABLE(k) (((k) & ~1) == UNCOLLECTABLE)
1548 # define GC_N_KINDS_INITIAL_VALUE 4
1550 # define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE)
1551 # define GC_N_KINDS_INITIAL_VALUE 3
1554 GC_EXTERN unsigned GC_n_kinds;
1556 GC_EXTERN word GC_n_heap_sects; /* Number of separately added heap */
1559 #ifdef USE_PROC_FOR_LIBRARIES
1560 GC_EXTERN word GC_n_memory; /* Number of GET_MEM allocated memory */
1564 GC_EXTERN size_t GC_page_size;
1566 /* Round up allocation size to a multiple of a page size. */
1567 /* GC_setpagesize() is assumed to be already invoked. */
1568 #define ROUNDUP_PAGESIZE(lb) /* lb should have no side-effect */ \
1569 (SIZET_SAT_ADD(lb, GC_page_size - 1) & ~(GC_page_size - 1))
1571 /* Same as above but used to make GET_MEM() argument safe. */
1572 #ifdef MMAP_SUPPORTED
1573 # define ROUNDUP_PAGESIZE_IF_MMAP(lb) ROUNDUP_PAGESIZE(lb)
1575 # define ROUNDUP_PAGESIZE_IF_MMAP(lb) (lb)
1578 #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
1579 GC_EXTERN SYSTEM_INFO GC_sysinfo;
1580 GC_INNER GC_bool GC_is_heap_base(void *p);
1583 GC_EXTERN word GC_black_list_spacing;
1584 /* Average number of bytes between blacklisted */
1585 /* blocks. Approximate. */
1586 /* Counts only blocks that are */
1587 /* "stack-blacklisted", i.e. that are */
1588 /* problematic in the interior of an object. */
1590 #ifdef GC_GCJ_SUPPORT
1591 extern struct hblk * GC_hblkfreelist[];
1592 extern word GC_free_bytes[]; /* Both remain visible to GNU GCJ. */
1595 GC_EXTERN word GC_root_size; /* Total size of registered root sections. */
1597 GC_EXTERN GC_bool GC_debugging_started;
1598 /* GC_debug_malloc has been called. */
1600 /* This is used by GC_do_blocking[_inner](). */
1601 struct blocking_data {
1603 void * client_data; /* and result */
1606 /* This is used by GC_call_with_gc_active(), GC_push_all_stack_sections(). */
1607 struct GC_traced_stack_sect_s {
1608 ptr_t saved_stack_ptr;
1610 ptr_t saved_backing_store_ptr;
1611 ptr_t backing_store_end;
1613 struct GC_traced_stack_sect_s *prev;
1617 /* Process all "traced stack sections" - scan entire stack except for */
1618 /* frames belonging to the user functions invoked by GC_do_blocking. */
1619 GC_INNER void GC_push_all_stack_sections(ptr_t lo, ptr_t hi,
1620 struct GC_traced_stack_sect_s *traced_stack_sect);
1621 GC_EXTERN word GC_total_stacksize; /* updated on every push_all_stacks */
1623 GC_EXTERN ptr_t GC_blocked_sp;
1624 GC_EXTERN struct GC_traced_stack_sect_s *GC_traced_stack_sect;
1625 /* Points to the "frame" data held in stack by */
1626 /* the innermost GC_call_with_gc_active(). */
1627 /* NULL if no such "frame" active. */
1628 #endif /* !THREADS */
1631 /* Similar to GC_push_all_stack_sections() but for IA-64 registers store. */
1632 GC_INNER void GC_push_all_register_sections(ptr_t bs_lo, ptr_t bs_hi,
1633 int eager, struct GC_traced_stack_sect_s *traced_stack_sect);
1636 /* Marks are in a reserved area in */
1637 /* each heap block. Each word has one mark bit associated */
1638 /* with it. Only those corresponding to the beginning of an */
1639 /* object are used. */
1641 /* Mark bit operations */
1644 * Retrieve, set, clear the nth mark bit in a given heap block.
1646 * (Recall that bit n corresponds to nth object or allocation granule
1647 * relative to the beginning of the block, including unused words)
1650 #ifdef USE_MARK_BYTES
1651 # define mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n])
1652 # define set_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n] = 1)
1653 # define clear_mark_bit_from_hdr(hhdr,n) ((hhdr)->hb_marks[n] = 0)
1655 /* Set mark bit correctly, even if mark bits may be concurrently */
1657 # if defined(PARALLEL_MARK) || (defined(THREAD_SANITIZER) && defined(THREADS))
1658 /* Workaround TSan false positive: there is no race between */
1659 /* mark_bit_from_hdr and set_mark_bit_from_hdr when n is different */
1660 /* (alternatively, USE_MARK_BYTES could be used). If TSan is off, */
1661 /* AO_or() is used only if we set USE_MARK_BITS explicitly. */
1662 # define OR_WORD(addr, bits) AO_or((volatile AO_t *)(addr), (AO_t)(bits))
1664 # define OR_WORD(addr, bits) (void)(*(addr) |= (bits))
1666 # define mark_bit_from_hdr(hhdr,n) \
1667 (((hhdr)->hb_marks[divWORDSZ(n)] >> modWORDSZ(n)) & (word)1)
1668 # define set_mark_bit_from_hdr(hhdr,n) \
1669 OR_WORD((hhdr)->hb_marks+divWORDSZ(n), (word)1 << modWORDSZ(n))
1670 # define clear_mark_bit_from_hdr(hhdr,n) \
1671 ((hhdr)->hb_marks[divWORDSZ(n)] &= ~((word)1 << modWORDSZ(n)))
1672 #endif /* !USE_MARK_BYTES */
1674 #ifdef MARK_BIT_PER_OBJ
1675 # define MARK_BIT_NO(offset, sz) (((word)(offset))/(sz))
1676 /* Get the mark bit index corresponding to the given byte */
1677 /* offset and size (in bytes). */
1678 # define MARK_BIT_OFFSET(sz) 1
1679 /* Spacing between useful mark bits. */
1680 # define IF_PER_OBJ(x) x
1681 # define FINAL_MARK_BIT(sz) ((sz) > MAXOBJBYTES? 1 : HBLK_OBJS(sz))
1682 /* Position of final, always set, mark bit. */
1683 #else /* MARK_BIT_PER_GRANULE */
1684 # define MARK_BIT_NO(offset, sz) BYTES_TO_GRANULES((word)(offset))
1685 # define MARK_BIT_OFFSET(sz) BYTES_TO_GRANULES(sz)
1686 # define IF_PER_OBJ(x)
1687 # define FINAL_MARK_BIT(sz) \
1688 ((sz) > MAXOBJBYTES ? MARK_BITS_PER_HBLK \
1689 : BYTES_TO_GRANULES((sz) * HBLK_OBJS(sz)))
1692 /* Important internal collector routines */
1694 GC_INNER ptr_t GC_approx_sp(void);
1696 GC_INNER GC_bool GC_should_collect(void);
1698 void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
1700 /* Invoke fn(hbp, client_data) for each */
1701 /* allocated heap block. */
1702 GC_INNER struct hblk * GC_next_used_block(struct hblk * h);
1703 /* Return first in-use block >= h */
1704 GC_INNER struct hblk * GC_prev_block(struct hblk * h);
1705 /* Return last block <= h. Returned block */
1706 /* is managed by GC, but may or may not be in */
1708 GC_INNER void GC_mark_init(void);
1709 GC_INNER void GC_clear_marks(void);
1710 /* Clear mark bits for all heap objects. */
1711 GC_INNER void GC_invalidate_mark_state(void);
1712 /* Tell the marker that marked */
1713 /* objects may point to unmarked */
1714 /* ones, and roots may point to */
1715 /* unmarked objects. Reset mark stack. */
1716 GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame);
1717 /* Perform about one pages worth of marking */
1718 /* work of whatever kind is needed. Returns */
1719 /* quickly if no collection is in progress. */
1720 /* Return TRUE if mark phase finished. */
1721 GC_INNER void GC_initiate_gc(void);
1722 /* initiate collection. */
1723 /* If the mark state is invalid, this */
1724 /* becomes full collection. Otherwise */
1727 GC_INNER GC_bool GC_collection_in_progress(void);
1728 /* Collection is in progress, or was abandoned. */
1730 #define GC_PUSH_ALL_SYM(sym) \
1731 GC_push_all((/* no volatile */ void *)&(sym), \
1732 (/* no volatile */ void *)(&(sym) + 1))
1734 GC_INNER void GC_push_all_stack(ptr_t b, ptr_t t);
1735 /* As GC_push_all but consider */
1736 /* interior pointers as valid. */
1738 #if defined(WRAP_MARK_SOME) && defined(PARALLEL_MARK)
1739 /* GC_mark_local does not handle memory protection faults yet. So, */
1740 /* the static data regions are scanned immediately by GC_push_roots. */
1741 GC_INNER void GC_push_conditional_eager(void *bottom, void *top,
1745 /* In the threads case, we push part of the current thread stack */
1746 /* with GC_push_all_eager when we push the registers. This gets the */
1747 /* callee-save registers that may disappear. The remainder of the */
1748 /* stacks are scheduled for scanning in *GC_push_other_roots, which */
1749 /* is thread-package-specific. */
1751 GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame);
1752 /* Push all or dirty roots. */
1754 GC_API_PRIV GC_push_other_roots_proc GC_push_other_roots;
1755 /* Push system or application specific roots */
1756 /* onto the mark stack. In some environments */
1757 /* (e.g. threads environments) this is */
1758 /* predefined to be non-zero. A client */
1759 /* supplied replacement should also call the */
1760 /* original function. Remains externally */
1761 /* visible as used by some well-known 3rd-party */
1762 /* software (e.g., ECL) currently. */
1765 void GC_push_thread_structures(void);
1767 GC_EXTERN void (*GC_push_typed_structures)(void);
1768 /* A pointer such that we can avoid linking in */
1769 /* the typed allocation support if unused. */
1771 GC_INNER void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
1772 volatile ptr_t arg);
1774 #if defined(SPARC) || defined(IA64)
1775 /* Cause all stacked registers to be saved in memory. Return a */
1776 /* pointer to the top of the corresponding memory stack. */
1777 ptr_t GC_save_regs_in_stack(void);
1780 /* Push register contents onto mark stack. */
1781 #if defined(AMIGA) || defined(MACOS) || defined(GC_DARWIN_THREADS)
1782 void GC_push_one(word p);
1783 /* If p points to an object, mark it */
1784 /* and push contents on the mark stack */
1785 /* Pointer recognition test always */
1786 /* accepts interior pointers, i.e. this */
1787 /* is appropriate for pointers found on */
1791 #ifdef GC_WIN32_THREADS
1792 /* Same as GC_push_one but for a sequence of registers. */
1793 GC_INNER void GC_push_many_regs(const word *regs, unsigned count);
1796 #if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
1797 GC_INNER void GC_mark_and_push_stack(ptr_t p, ptr_t source);
1798 /* Ditto, omits plausibility test */
1800 GC_INNER void GC_mark_and_push_stack(ptr_t p);
1803 GC_INNER void GC_clear_hdr_marks(hdr * hhdr);
1804 /* Clear the mark bits in a header */
1805 GC_INNER void GC_set_hdr_marks(hdr * hhdr);
1806 /* Set the mark bits in a header */
1807 GC_INNER void GC_set_fl_marks(ptr_t p);
1808 /* Set all mark bits associated with */
1810 #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
1811 void GC_check_fl_marks(void **);
1812 /* Check that all mark bits */
1813 /* associated with a free list are */
1814 /* set. Abort if not. */
1816 void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp);
1817 #ifdef USE_PROC_FOR_LIBRARIES
1818 GC_INNER void GC_remove_roots_subregion(ptr_t b, ptr_t e);
1820 GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish);
1821 #if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
1822 || defined(CYGWIN32) || defined(PCR)
1823 GC_INNER void GC_register_dynamic_libraries(void);
1824 /* Add dynamic library data sections to the root set. */
1826 GC_INNER void GC_cond_register_dynamic_libraries(void);
1827 /* Remove and reregister dynamic libraries if we're */
1828 /* configured to do that at each GC. */
1830 /* Machine dependent startup routines */
1831 ptr_t GC_get_main_stack_base(void); /* Cold end of stack. */
1833 GC_INNER ptr_t GC_get_register_stack_base(void);
1834 /* Cold end of register stack. */
1836 void GC_register_data_segments(void);
1839 GC_INNER void GC_thr_init(void);
1840 GC_INNER void GC_init_parallel(void);
1842 GC_INNER GC_bool GC_is_static_root(void *p);
1843 /* Is the address p in one of the registered static */
1844 /* root sections? */
1846 void GC_add_trace_entry(char *kind, word arg1, word arg2);
1848 #endif /* !THREADS */
1850 /* Black listing: */
1851 #ifdef PRINT_BLACK_LIST
1852 GC_INNER void GC_add_to_black_list_normal(word p, ptr_t source);
1853 /* Register bits as a possible future false */
1854 /* reference from the heap or static data */
1855 # define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
1856 if (GC_all_interior_pointers) { \
1857 GC_add_to_black_list_stack((word)(bits), (source)); \
1859 GC_add_to_black_list_normal((word)(bits), (source))
1860 GC_INNER void GC_add_to_black_list_stack(word p, ptr_t source);
1861 # define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
1862 GC_add_to_black_list_stack((word)(bits), (source))
1864 GC_INNER void GC_add_to_black_list_normal(word p);
1865 # define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
1866 if (GC_all_interior_pointers) { \
1867 GC_add_to_black_list_stack((word)(bits)); \
1869 GC_add_to_black_list_normal((word)(bits))
1870 GC_INNER void GC_add_to_black_list_stack(word p);
1871 # define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
1872 GC_add_to_black_list_stack((word)(bits))
1873 #endif /* PRINT_BLACK_LIST */
1875 struct hblk * GC_is_black_listed(struct hblk * h, word len);
1876 /* If there are likely to be false references */
1877 /* to a block starting at h of the indicated */
1878 /* length, then return the next plausible */
1879 /* starting location for h that might avoid */
1880 /* these false references. Remains externally */
1881 /* visible as used by GNU GCJ currently. */
1883 GC_INNER void GC_promote_black_lists(void);
1884 /* Declare an end to a black listing phase. */
1885 GC_INNER void GC_unpromote_black_lists(void);
1886 /* Approximately undo the effect of the above. */
1887 /* This actually loses some information, but */
1888 /* only in a reasonably safe way. */
1890 GC_INNER ptr_t GC_scratch_alloc(size_t bytes);
1891 /* GC internal memory allocation for */
1892 /* small objects. Deallocation is not */
1893 /* possible. May return NULL. */
1896 /* GC_scratch_recycle_no_gww() not used. */
1898 # define GC_scratch_recycle_no_gww GC_scratch_recycle_inner
1900 GC_INNER void GC_scratch_recycle_inner(void *ptr, size_t bytes);
1901 /* Reuse the memory region by the heap. */
1903 /* Heap block layout maps: */
1904 #ifdef MARK_BIT_PER_GRANULE
1905 GC_INNER GC_bool GC_add_map_entry(size_t sz);
1906 /* Add a heap block map for objects of */
1907 /* size sz to obj_map. */
1908 /* Return FALSE on failure. */
1911 GC_INNER void GC_register_displacement_inner(size_t offset);
1912 /* Version of GC_register_displacement */
1913 /* that assumes lock is already held. */
1915 /* hblk allocation: */
1916 GC_INNER void GC_new_hblk(size_t size_in_granules, int kind);
1917 /* Allocate a new heap block, and build */
1918 /* a free list in it. */
1920 GC_INNER ptr_t GC_build_fl(struct hblk *h, size_t words, GC_bool clear,
1922 /* Build a free list for objects of */
1923 /* size sz in block h. Append list to */
1924 /* end of the free lists. Possibly */
1925 /* clear objects on the list. Normally */
1926 /* called by GC_new_hblk, but also */
1927 /* called explicitly without GC lock. */
1929 GC_INNER struct hblk * GC_allochblk(size_t size_in_bytes, int kind,
1931 /* Allocate a heap block, inform */
1932 /* the marker that block is valid */
1933 /* for objects of indicated size. */
1935 GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags);
1936 /* Allocate a large block of size lb bytes. */
1937 /* The block is not cleared. */
1938 /* Flags is 0 or IGNORE_OFF_PAGE. */
1939 /* Calls GC_allchblk to do the actual */
1940 /* allocation, but also triggers GC and/or */
1941 /* heap expansion as appropriate. */
1942 /* Does not update GC_bytes_allocd, but does */
1943 /* other accounting. */
1945 GC_INNER void GC_freehblk(struct hblk * p);
1946 /* Deallocate a heap block and mark it */
1950 GC_INNER GC_bool GC_expand_hp_inner(word n);
1951 GC_INNER void GC_start_reclaim(GC_bool abort_if_found);
1952 /* Restore unmarked objects to free */
1953 /* lists, or (if abort_if_found is */
1954 /* TRUE) report them. */
1955 /* Sweeping of small object pages is */
1956 /* largely deferred. */
1957 GC_INNER void GC_continue_reclaim(word sz, int kind);
1958 /* Sweep pages of the given size and */
1959 /* kind, as long as possible, and */
1960 /* as long as the corr. free list is */
1961 /* empty. Sz is in granules. */
1963 GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old);
1964 /* Reclaim all blocks. Abort (in a */
1965 /* consistent state) if f returns TRUE. */
1966 GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
1967 GC_bool init, ptr_t list,
1968 signed_word *count);
1969 /* Rebuild free list in hbp with */
1970 /* header hhdr, with objects of size sz */
1971 /* bytes. Add list to the end of the */
1972 /* free list. Add the number of */
1973 /* reclaimed bytes to *count. */
1974 GC_INNER GC_bool GC_block_empty(hdr * hhdr);
1975 /* Block completely unmarked? */
1976 GC_INNER int GC_CALLBACK GC_never_stop_func(void);
1977 /* Always returns 0 (FALSE). */
1978 GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func f);
1980 /* Collect; caller must have acquired */
1981 /* lock. Collection is aborted if f */
1982 /* returns TRUE. Returns TRUE if it */
1983 /* completes successfully. */
1984 #define GC_gcollect_inner() \
1985 (void)GC_try_to_collect_inner(GC_never_stop_func)
1988 GC_EXTERN GC_bool GC_in_thread_creation;
1989 /* We may currently be in thread creation or destruction. */
1990 /* Only set to TRUE while allocation lock is held. */
1991 /* When set, it is OK to run GC from unknown thread. */
1994 GC_EXTERN GC_bool GC_is_initialized; /* GC_init() has been run. */
1996 GC_INNER void GC_collect_a_little_inner(int n);
1997 /* Do n units worth of garbage */
1998 /* collection work, if appropriate. */
1999 /* A unit is an amount appropriate for */
2000 /* HBLKSIZE bytes of allocation. */
2002 GC_INNER void * GC_generic_malloc_inner(size_t lb, int k);
2003 /* Allocate an object of the given */
2004 /* kind but assuming lock already held. */
2005 #if defined(DBG_HDRS_ALL) || defined(GC_GCJ_SUPPORT) \
2006 || !defined(GC_NO_FINALIZATION)
2007 GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k);
2008 /* Allocate an object, where */
2009 /* the client guarantees that there */
2010 /* will always be a pointer to the */
2011 /* beginning of the object while the */
2012 /* object is live. */
2015 GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
2016 GC_bool ignore_off_page, GC_bool retry);
2018 GC_INNER ptr_t GC_allocobj(size_t sz, int kind);
2019 /* Make the indicated */
2020 /* free list nonempty, and return its */
2021 /* head. Sz is in granules. */
2023 #ifdef GC_ADD_CALLER
2024 /* GC_DBG_EXTRAS is used by GC debug API functions (unlike GC_EXTRAS */
2025 /* used by GC debug API macros) thus GC_RETURN_ADDR_PARENT (pointing */
2026 /* to client caller) should be used if possible. */
2027 # ifdef GC_HAVE_RETURN_ADDR_PARENT
2028 # define GC_DBG_EXTRAS GC_RETURN_ADDR_PARENT, NULL, 0
2030 # define GC_DBG_EXTRAS GC_RETURN_ADDR, NULL, 0
2033 # define GC_DBG_EXTRAS "unknown", 0
2036 #ifdef GC_COLLECT_AT_MALLOC
2037 extern size_t GC_dbg_collect_at_malloc_min_lb;
2038 /* variable visible outside for debugging */
2039 # define GC_DBG_COLLECT_AT_MALLOC(lb) \
2040 (void)((lb) >= GC_dbg_collect_at_malloc_min_lb ? \
2041 (GC_gcollect(), 0) : 0)
2043 # define GC_DBG_COLLECT_AT_MALLOC(lb) (void)0
2044 #endif /* !GC_COLLECT_AT_MALLOC */
2046 /* Allocation routines that bypass the thread local cache. */
2047 #if defined(THREAD_LOCAL_ALLOC) && defined(GC_GCJ_SUPPORT)
2048 GC_INNER void * GC_core_gcj_malloc(size_t, void *);
2051 GC_INNER void GC_init_headers(void);
2052 GC_INNER struct hblkhdr * GC_install_header(struct hblk *h);
2053 /* Install a header for block h. */
2054 /* Return 0 on failure, or the header */
2056 GC_INNER GC_bool GC_install_counts(struct hblk * h, size_t sz);
2057 /* Set up forwarding counts for block */
2059 /* Return FALSE on failure. */
2060 GC_INNER void GC_remove_header(struct hblk * h);
2061 /* Remove the header for block h. */
2062 GC_INNER void GC_remove_counts(struct hblk * h, size_t sz);
2063 /* Remove forwarding counts for h. */
2064 GC_INNER hdr * GC_find_header(ptr_t h);
2066 GC_INNER void GC_add_to_heap(struct hblk *p, size_t bytes);
2067 /* Add a HBLKSIZE aligned chunk to the heap. */
2069 #ifdef USE_PROC_FOR_LIBRARIES
2070 GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes);
2071 /* Add a chunk to GC_our_memory. */
2072 /* If p == 0, do nothing. */
2074 # define GC_add_to_our_memory(p, bytes)
2077 GC_INNER void GC_print_all_errors(void);
2078 /* Print smashed and leaked objects, if any. */
2079 /* Clear the lists of such objects. */
2081 GC_EXTERN void (*GC_check_heap)(void);
2082 /* Check that all objects in the heap with */
2083 /* debugging info are intact. */
2084 /* Add any that are not to GC_smashed list. */
2085 GC_EXTERN void (*GC_print_all_smashed)(void);
2086 /* Print GC_smashed if it's not empty. */
2087 /* Clear GC_smashed list. */
2088 GC_EXTERN void (*GC_print_heap_obj)(ptr_t p);
2089 /* If possible print (using GC_err_printf) */
2090 /* a more detailed description (terminated with */
2091 /* "\n") of the object referred to by p. */
2093 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
2094 void GC_print_address_map(void);
2095 /* Print an address map of the process. */
2098 #ifndef SHORT_DBG_HDRS
2099 GC_EXTERN GC_bool GC_findleak_delay_free;
2100 /* Do not immediately deallocate object on */
2101 /* free() in the leak-finding mode, just mark */
2102 /* it as freed (and deallocate it after GC). */
2103 GC_INNER GC_bool GC_check_leaked(ptr_t base); /* from dbg_mlc.c */
2106 GC_EXTERN GC_bool GC_have_errors; /* We saw a smashed or leaked object. */
2107 /* Call error printing routine */
2108 /* occasionally. It is OK to read it */
2109 /* without acquiring the lock. */
2112 #if !defined(NO_CLOCK) || !defined(SMALL_CONFIG)
2113 /* GC_print_stats should be visible to extra/MacOS.c. */
2114 extern int GC_print_stats; /* Nonzero generates basic GC log. */
2115 /* VERBOSE generates add'l messages. */
2116 #else /* SMALL_CONFIG */
2117 # define GC_print_stats 0
2118 /* Will this remove the message character strings from the executable? */
2119 /* With a particular level of optimizations, it should... */
2122 #ifdef KEEP_BACK_PTRS
2123 GC_EXTERN long GC_backtraces;
2124 GC_INNER void GC_generate_random_backtrace_no_gc(void);
2128 # define GC_RAND_MAX (~0U >> 1)
2129 GC_API_PRIV long GC_random(void);
2132 GC_EXTERN GC_bool GC_print_back_height;
2134 #ifdef MAKE_BACK_GRAPH
2135 void GC_print_back_graph_stats(void);
2139 GC_INNER void GC_free_inner(void * p);
2142 /* Macros used for collector internal allocation. */
2143 /* These assume the collector lock is held. */
2145 GC_INNER void * GC_debug_generic_malloc_inner(size_t lb, int k);
2146 GC_INNER void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
2148 # define GC_INTERNAL_MALLOC GC_debug_generic_malloc_inner
2149 # define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \
2150 GC_debug_generic_malloc_inner_ignore_off_page
2152 GC_INNER void GC_debug_free_inner(void * p);
2153 # define GC_INTERNAL_FREE GC_debug_free_inner
2155 # define GC_INTERNAL_FREE GC_debug_free
2158 # define GC_INTERNAL_MALLOC GC_generic_malloc_inner
2159 # define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \
2160 GC_generic_malloc_inner_ignore_off_page
2162 # define GC_INTERNAL_FREE GC_free_inner
2164 # define GC_INTERNAL_FREE GC_free
2166 #endif /* !DBG_HDRS_ALL */
2169 /* Memory unmapping: */
2170 GC_INNER void GC_unmap_old(void);
2171 GC_INNER void GC_merge_unmapped(void);
2172 GC_INNER void GC_unmap(ptr_t start, size_t bytes);
2173 GC_INNER void GC_remap(ptr_t start, size_t bytes);
2174 GC_INNER void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2,
2178 #ifdef CAN_HANDLE_FORK
2179 GC_EXTERN int GC_handle_fork;
2180 /* Fork-handling mode: */
2181 /* 0 means no fork handling requested (but client could */
2182 /* anyway call fork() provided it is surrounded with */
2183 /* GC_atfork_prepare/parent/child calls); */
2184 /* -1 means GC tries to use pthread_at_fork if it is */
2185 /* available (if it succeeds then GC_handle_fork value */
2186 /* is changed to 1), client should nonetheless surround */
2187 /* fork() with GC_atfork_prepare/parent/child (for the */
2188 /* case of pthread_at_fork failure or absence); */
2189 /* 1 (or other values) means client fully relies on */
2190 /* pthread_at_fork (so if it is missing or failed then */
2191 /* abort occurs in GC_init), GC_atfork_prepare and the */
2192 /* accompanying routines are no-op in such a case. */
2195 #ifdef GC_DISABLE_INCREMENTAL
2196 # define GC_incremental FALSE
2197 # define GC_auto_incremental FALSE
2198 # define GC_manual_vdb FALSE
2199 # define GC_dirty(p) (void)(p)
2200 # define REACHABLE_AFTER_DIRTY(p) (void)(p)
2202 #else /* !GC_DISABLE_INCREMENTAL */
2203 GC_EXTERN GC_bool GC_incremental;
2204 /* Using incremental/generational collection. */
2205 /* Assumes dirty bits are being maintained. */
2207 /* Virtual dirty bit implementation: */
2208 /* Each implementation exports the following: */
2209 GC_INNER void GC_read_dirty(GC_bool output_unneeded);
2210 /* Retrieve dirty bits. Set output_unneeded to */
2211 /* indicate that reading of the retrieved dirty */
2212 /* bits is not planned till the next retrieval. */
2213 GC_INNER GC_bool GC_page_was_dirty(struct hblk *h);
2214 /* Read retrieved dirty bits. */
2216 GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
2217 GC_bool pointerfree);
2218 /* h is about to be written or allocated. Ensure that */
2219 /* it is not write protected by the virtual dirty bit */
2220 /* implementation. I.e., this is a call that: */
2221 /* - hints that [h, h+nblocks) is about to be written; */
2222 /* - guarantees that protection is removed; */
2223 /* - may speed up some dirty bit implementations; */
2224 /* - may be essential if we need to ensure that */
2225 /* pointer-free system call buffers in the heap are */
2226 /* not protected. */
2228 GC_INNER GC_bool GC_dirty_init(void);
2229 /* Returns true if dirty bits are maintained (otherwise */
2230 /* it is OK to be called again if the client invokes */
2231 /* GC_enable_incremental once more). */
2233 GC_EXTERN GC_bool GC_manual_vdb;
2234 /* The incremental collection is in the manual VDB */
2235 /* mode. Assumes GC_incremental is true. Should not */
2236 /* be modified once GC_incremental is set to true. */
2238 # define GC_auto_incremental (GC_incremental && !GC_manual_vdb)
2240 GC_INNER void GC_dirty_inner(const void *p); /* does not require locking */
2241 # define GC_dirty(p) (GC_manual_vdb ? GC_dirty_inner(p) : (void)0)
2242 # define REACHABLE_AFTER_DIRTY(p) GC_reachable_here(p)
2243 #endif /* !GC_DISABLE_INCREMENTAL */
2245 /* Same as GC_base but excepts and returns a pointer to const object. */
2246 #define GC_base_C(p) ((const void *)GC_base((/* no const */ void *)(p)))
2248 /* Debugging print routines: */
2249 void GC_print_block_list(void);
2250 void GC_print_hblkfreelist(void);
2251 void GC_print_heap_sects(void);
2252 void GC_print_static_roots(void);
2254 extern word GC_fo_entries; /* should be visible in extra/MacOS.c */
2256 #ifdef KEEP_BACK_PTRS
2257 GC_INNER void GC_store_back_pointer(ptr_t source, ptr_t dest);
2258 GC_INNER void GC_marked_for_finalization(ptr_t dest);
2259 # define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
2260 # define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
2262 # define GC_STORE_BACK_PTR(source, dest) (void)(source)
2263 # define GC_MARKED_FOR_FINALIZATION(dest)
2266 /* Make arguments appear live to compiler */
2267 void GC_noop6(word, word, word, word, word, word);
2269 GC_API void GC_CALL GC_noop1(word);
2271 #ifndef GC_ATTR_FORMAT_PRINTF
2272 # if GC_GNUC_PREREQ(3, 0)
2273 # define GC_ATTR_FORMAT_PRINTF(spec_argnum, first_checked) \
2274 __attribute__((__format__(__printf__, spec_argnum, first_checked)))
2276 # define GC_ATTR_FORMAT_PRINTF(spec_argnum, first_checked)
2280 /* Logging and diagnostic output: */
2281 /* GC_printf is used typically on client explicit print requests. */
2282 /* For all GC_X_printf routines, it is recommended to put "\n" at */
2283 /* 'format' string end (for output atomicity). */
2284 GC_API_PRIV void GC_printf(const char * format, ...)
2285 GC_ATTR_FORMAT_PRINTF(1, 2);
2286 /* A version of printf that doesn't allocate, */
2287 /* 1 KB total output length. */
2288 /* (We use sprintf. Hopefully that doesn't */
2289 /* allocate for long arguments.) */
2290 GC_API_PRIV void GC_err_printf(const char * format, ...)
2291 GC_ATTR_FORMAT_PRINTF(1, 2);
2293 /* Basic logging routine. Typically, GC_log_printf is called directly */
2294 /* only inside various DEBUG_x blocks. */
2295 GC_API_PRIV void GC_log_printf(const char * format, ...)
2296 GC_ATTR_FORMAT_PRINTF(1, 2);
2298 #ifndef GC_ANDROID_LOG
2299 # define GC_PRINT_STATS_FLAG (GC_print_stats != 0)
2300 # define GC_INFOLOG_PRINTF GC_COND_LOG_PRINTF
2301 /* GC_verbose_log_printf is called only if GC_print_stats is VERBOSE. */
2302 # define GC_verbose_log_printf GC_log_printf
2304 extern GC_bool GC_quiet;
2305 # define GC_PRINT_STATS_FLAG (!GC_quiet)
2306 /* INFO/DBG loggers are enabled even if GC_print_stats is off. */
2307 # ifndef GC_INFOLOG_PRINTF
2308 # define GC_INFOLOG_PRINTF if (GC_quiet) {} else GC_info_log_printf
2310 GC_INNER void GC_info_log_printf(const char *format, ...)
2311 GC_ATTR_FORMAT_PRINTF(1, 2);
2312 GC_INNER void GC_verbose_log_printf(const char *format, ...)
2313 GC_ATTR_FORMAT_PRINTF(1, 2);
2314 #endif /* GC_ANDROID_LOG */
2316 /* Convenient macros for GC_[verbose_]log_printf invocation. */
2317 #define GC_COND_LOG_PRINTF \
2318 if (EXPECT(!GC_print_stats, TRUE)) {} else GC_log_printf
2319 #define GC_VERBOSE_LOG_PRINTF \
2320 if (EXPECT(GC_print_stats != VERBOSE, TRUE)) {} else GC_verbose_log_printf
2321 #ifndef GC_DBGLOG_PRINTF
2322 # define GC_DBGLOG_PRINTF if (!GC_PRINT_STATS_FLAG) {} else GC_log_printf
2325 void GC_err_puts(const char *s);
2326 /* Write s to stderr, don't buffer, don't add */
2327 /* newlines, don't ... */
2329 /* Handy macro for logging size values (of word type) in KiB (rounding */
2330 /* to nearest value). */
2331 #define TO_KiB_UL(v) ((unsigned long)(((v) + ((1 << 9) - 1)) >> 10))
2333 GC_EXTERN unsigned GC_fail_count;
2334 /* How many consecutive GC/expansion failures? */
2335 /* Reset by GC_allochblk(); defined in alloc.c. */
2337 GC_EXTERN long GC_large_alloc_warn_interval; /* defined in misc.c */
2339 GC_EXTERN signed_word GC_bytes_found;
2340 /* Number of reclaimed bytes after garbage collection; */
2341 /* protected by GC lock; defined in reclaim.c. */
2343 #ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
2344 GC_EXTERN word GC_reclaimed_bytes_before_gc;
2345 /* Number of bytes reclaimed before this */
2346 /* collection cycle; used for statistics only. */
2350 GC_EXTERN int GC_unmap_threshold; /* defined in allchblk.c */
2351 GC_EXTERN GC_bool GC_force_unmap_on_gcollect; /* defined in misc.c */
2355 GC_EXTERN GC_bool GC_no_win32_dlls; /* defined in os_dep.c */
2356 GC_EXTERN GC_bool GC_wnt; /* Is Windows NT derivative; */
2357 /* defined and set in os_dep.c. */
2361 # if (defined(MSWIN32) && !defined(CONSOLE_LOG)) || defined(MSWINCE)
2362 GC_EXTERN CRITICAL_SECTION GC_write_cs; /* defined in misc.c */
2363 # ifdef GC_ASSERTIONS
2364 GC_EXTERN GC_bool GC_write_disabled;
2365 /* defined in win32_threads.c; */
2366 /* protected by GC_write_cs. */
2369 # endif /* MSWIN32 || MSWINCE */
2370 # if defined(GC_DISABLE_INCREMENTAL) || defined(HAVE_LOCKFREE_AO_OR)
2371 # define GC_acquire_dirty_lock() (void)0
2372 # define GC_release_dirty_lock() (void)0
2374 /* Acquire the spin lock we use to update dirty bits. */
2375 /* Threads should not get stopped holding it. But we may */
2376 /* acquire and release it during GC_remove_protection call. */
2377 # define GC_acquire_dirty_lock() \
2379 } while (AO_test_and_set_acquire(&GC_fault_handler_lock) == AO_TS_SET)
2380 # define GC_release_dirty_lock() AO_CLEAR(&GC_fault_handler_lock)
2381 GC_EXTERN volatile AO_TS_t GC_fault_handler_lock;
2382 /* defined in os_dep.c */
2385 GC_EXTERN GC_bool GC_dont_query_stack_min;
2386 /* Defined and set in os_dep.c. */
2389 GC_EXTERN ptr_t GC_save_regs_ret_val; /* defined in mach_dep.c. */
2390 /* Previously set to backing store pointer. */
2391 #endif /* !THREADS */
2393 #ifdef THREAD_LOCAL_ALLOC
2394 GC_EXTERN GC_bool GC_world_stopped; /* defined in alloc.c */
2395 GC_INNER void GC_mark_thread_local_free_lists(void);
2398 #ifdef GC_GCJ_SUPPORT
2399 # ifdef GC_ASSERTIONS
2400 GC_EXTERN GC_bool GC_gcj_malloc_initialized; /* defined in gcj_mlc.c */
2402 GC_EXTERN ptr_t * GC_gcjobjfreelist;
2405 #if defined(MPROTECT_VDB) && defined(GWW_VDB)
2406 GC_INNER GC_bool GC_gww_dirty_init(void);
2407 /* Returns TRUE if GetWriteWatch is available. */
2408 /* May be called repeatedly. */
2411 #if defined(CHECKSUMS) || defined(PROC_VDB)
2412 GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h);
2413 /* Could the page contain valid heap pointers? */
2417 # if defined(MPROTECT_VDB) && !defined(DARWIN)
2418 void GC_record_fault(struct hblk * h);
2420 void GC_check_dirty(void);
2423 GC_INNER void GC_default_print_heap_obj_proc(ptr_t p);
2425 GC_INNER void GC_setpagesize(void);
2427 GC_INNER void GC_initialize_offsets(void); /* defined in obj_map.c */
2429 GC_INNER void GC_bl_init(void);
2430 GC_INNER void GC_bl_init_no_interiors(void); /* defined in blacklst.c */
2432 GC_INNER void GC_start_debugging_inner(void); /* defined in dbg_mlc.c. */
2433 /* Should not be called if GC_debugging_started. */
2435 /* Store debugging info into p. Return displaced pointer. */
2436 /* Assumes we hold the allocation lock. */
2437 GC_INNER void *GC_store_debug_info_inner(void *p, word sz, const char *str,
2440 #ifdef REDIRECT_MALLOC
2441 # ifdef GC_LINUX_THREADS
2442 GC_INNER GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp);
2445 #elif defined(USE_WINALLOC)
2446 GC_INNER void GC_add_current_malloc_heap(void);
2447 #endif /* !REDIRECT_MALLOC */
2449 #ifdef MAKE_BACK_GRAPH
2450 GC_INNER void GC_build_back_graph(void);
2451 GC_INNER void GC_traverse_back_graph(void);
2455 GC_INNER void GC_init_win32(void);
2458 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
2459 GC_INNER void * GC_roots_present(ptr_t);
2460 /* The type is a lie, since the real type doesn't make sense here, */
2461 /* and we only test for NULL. */
2464 #ifdef GC_WIN32_THREADS
2465 GC_INNER void GC_get_next_stack(char *start, char * limit, char **lo,
2467 # if defined(MPROTECT_VDB) && !defined(CYGWIN32)
2468 GC_INNER void GC_set_write_fault_handler(void);
2470 # if defined(WRAP_MARK_SOME) && !defined(GC_PTHREADS)
2471 GC_INNER GC_bool GC_started_thread_while_stopped(void);
2472 /* Did we invalidate mark phase with an unexpected thread start? */
2474 #endif /* GC_WIN32_THREADS */
2477 GC_INNER void GC_reset_finalizer_nested(void);
2478 GC_INNER unsigned char *GC_check_finalizer_nested(void);
2479 GC_INNER void GC_do_blocking_inner(ptr_t data, void * context);
2480 GC_INNER void GC_push_all_stacks(void);
2481 # ifdef USE_PROC_FOR_LIBRARIES
2482 GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi);
2485 GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound);
2487 #endif /* THREADS */
2489 #ifdef DYNAMIC_LOADING
2490 GC_INNER GC_bool GC_register_main_static_data(void);
2492 GC_INNER void GC_init_dyld(void);
2494 #endif /* DYNAMIC_LOADING */
2496 #ifdef SEARCH_FOR_DATA_START
2497 GC_INNER void GC_init_linux_data_start(void);
2498 void * GC_find_limit(void *, int);
2501 #if defined(NETBSD) && defined(__ELF__)
2502 GC_INNER void GC_init_netbsd_elf(void);
2503 void * GC_find_limit(void *, int);
2507 GC_INNER void GC_set_and_save_fault_handler(void (*handler)(int));
2510 #ifdef NEED_PROC_MAPS
2511 # if defined(DYNAMIC_LOADING) && defined(USE_PROC_FOR_LIBRARIES)
2512 GC_INNER char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
2513 char **prot, unsigned int *maj_dev,
2514 char **mapping_name);
2516 # if defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR)
2517 GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr,
2518 ptr_t *startp, ptr_t *endp);
2520 GC_INNER char *GC_get_maps(void); /* from os_dep.c */
2521 #endif /* NEED_PROC_MAPS */
2523 #ifdef GC_ASSERTIONS
2524 # define GC_ASSERT(expr) \
2527 GC_err_printf("Assertion failure: %s:%d\n", \
2528 __FILE__, __LINE__); \
2529 ABORT("assertion failure"); \
2532 GC_INNER word GC_compute_large_free_bytes(void);
2533 GC_INNER word GC_compute_root_size(void);
2535 # define GC_ASSERT(expr)
2538 /* Check a compile time assertion at compile time. */
2539 #if _MSC_VER >= 1700
2540 # define GC_STATIC_ASSERT(expr) \
2541 static_assert(expr, "static assertion failed: " #expr)
2542 #elif defined(static_assert) && __STDC_VERSION__ >= 201112L
2543 # define GC_STATIC_ASSERT(expr) static_assert(expr, #expr)
2544 #elif defined(mips) && !defined(__GNUC__)
2545 /* DOB: MIPSPro C gets an internal error taking the sizeof an array type.
2546 This code works correctly (ugliness is to avoid "unused var" warnings) */
2547 # define GC_STATIC_ASSERT(expr) \
2548 do { if (0) { char j[(expr)? 1 : -1]; j[0]='\0'; j[0]=j[0]; } } while(0)
2550 /* The error message for failure is a bit baroque, but ... */
2551 # define GC_STATIC_ASSERT(expr) (void)sizeof(char[(expr)? 1 : -1])
2554 /* Runtime check for an argument declared as non-null is actually not null. */
2555 #if GC_GNUC_PREREQ(4, 0)
2556 /* Workaround tautological-pointer-compare Clang warning. */
2557 # define NONNULL_ARG_NOT_NULL(arg) (*(volatile void **)&(arg) != NULL)
2559 # define NONNULL_ARG_NOT_NULL(arg) (NULL != (arg))
2562 #define COND_DUMP_CHECKS \
2564 GC_ASSERT(GC_compute_large_free_bytes() == GC_large_free_bytes); \
2565 GC_ASSERT(GC_compute_root_size() == GC_root_size); \
2568 #ifndef NO_DEBUGGING
2569 GC_EXTERN GC_bool GC_dump_regularly;
2570 /* Generate regular debugging dumps. */
2571 # define COND_DUMP if (EXPECT(GC_dump_regularly, FALSE)) { \
2572 GC_dump_named(NULL); \
2573 } else COND_DUMP_CHECKS
2575 # define COND_DUMP COND_DUMP_CHECKS
2578 #if defined(PARALLEL_MARK)
2579 /* We need additional synchronization facilities from the thread */
2580 /* support. We believe these are less performance critical */
2581 /* than the main garbage collector lock; standard pthreads-based */
2582 /* implementations should be sufficient. */
2584 # define GC_markers_m1 GC_parallel
2585 /* Number of mark threads we would like to have */
2586 /* excluding the initiating thread. */
2588 GC_EXTERN GC_bool GC_parallel_mark_disabled;
2589 /* A flag to temporarily avoid parallel marking.*/
2591 /* The mark lock and condition variable. If the GC lock is also */
2592 /* acquired, the GC lock must be acquired first. The mark lock is */
2593 /* used to both protect some variables used by the parallel */
2594 /* marker, and to protect GC_fl_builder_count, below. */
2595 /* GC_notify_all_marker() is called when */
2596 /* the state of the parallel marker changes */
2597 /* in some significant way (see gc_mark.h for details). The */
2598 /* latter set of events includes incrementing GC_mark_no. */
2599 /* GC_notify_all_builder() is called when GC_fl_builder_count */
2602 GC_INNER void GC_wait_for_markers_init(void);
2603 GC_INNER void GC_acquire_mark_lock(void);
2604 GC_INNER void GC_release_mark_lock(void);
2605 GC_INNER void GC_notify_all_builder(void);
2606 GC_INNER void GC_wait_for_reclaim(void);
2608 GC_EXTERN signed_word GC_fl_builder_count; /* Protected by mark lock. */
2610 GC_INNER void GC_notify_all_marker(void);
2611 GC_INNER void GC_wait_marker(void);
2612 GC_EXTERN word GC_mark_no; /* Protected by mark lock. */
2614 GC_INNER void GC_help_marker(word my_mark_no);
2615 /* Try to help out parallel marker for mark cycle */
2616 /* my_mark_no. Returns if the mark cycle finishes or */
2617 /* was already done, or there was nothing to do for */
2618 /* some other reason. */
2620 GC_INNER void GC_start_mark_threads_inner(void);
2621 #endif /* PARALLEL_MARK */
2623 #if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) && !defined(NACL) \
2624 && !defined(GC_DARWIN_THREADS) && !defined(SIG_SUSPEND)
2625 /* We define the thread suspension signal here, so that we can refer */
2626 /* to it in the dirty bit implementation, if necessary. Ideally we */
2627 /* would allocate a (real-time?) signal using the standard mechanism. */
2628 /* unfortunately, there is no standard mechanism. (There is one */
2629 /* in Linux glibc, but it's not exported.) Thus we continue to use */
2630 /* the same hard-coded signals we've always used. */
2631 # if (defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)) \
2632 && !defined(GC_USESIGRT_SIGNALS)
2633 # if defined(SPARC) && !defined(SIGPWR)
2634 /* SPARC/Linux doesn't properly define SIGPWR in <signal.h>. */
2635 /* It is aliased to SIGLOST in asm/signal.h, though. */
2636 # define SIG_SUSPEND SIGLOST
2638 /* Linuxthreads itself uses SIGUSR1 and SIGUSR2. */
2639 # define SIG_SUSPEND SIGPWR
2641 # elif defined(GC_OPENBSD_THREADS)
2642 # ifndef GC_OPENBSD_UTHREADS
2643 # define SIG_SUSPEND SIGXFSZ
2645 # elif defined(_SIGRTMIN) && !defined(CPPCHECK)
2646 # define SIG_SUSPEND _SIGRTMIN + 6
2648 # define SIG_SUSPEND SIGRTMIN + 6
2650 #endif /* GC_PTHREADS && !SIG_SUSPEND */
2652 #if defined(GC_PTHREADS) && !defined(GC_SEM_INIT_PSHARED)
2653 # define GC_SEM_INIT_PSHARED 0
2656 /* Some macros for setjmp that works across signal handlers */
2657 /* were possible, and a couple of routines to facilitate */
2658 /* catching accesses to bad addresses when that's */
2659 /* possible/needed. */
2660 #if (defined(UNIX_LIKE) || (defined(NEED_FIND_LIMIT) && defined(CYGWIN32))) \
2661 && !defined(GC_NO_SIGSETJMP)
2662 # if defined(SUNOS5SIGS) && !defined(FREEBSD) && !defined(LINUX)
2664 # include <sys/siginfo.h>
2667 /* Define SETJMP and friends to be the version that restores */
2668 /* the signal mask. */
2669 # define SETJMP(env) sigsetjmp(env, 1)
2670 # define LONGJMP(env, val) siglongjmp(env, val)
2671 # define JMP_BUF sigjmp_buf
2674 # define SETJMP(env) hal_setjmp(env)
2676 # define SETJMP(env) setjmp(env)
2678 # define LONGJMP(env, val) longjmp(env, val)
2679 # define JMP_BUF jmp_buf
2680 #endif /* !UNIX_LIKE || GC_NO_SIGSETJMP */
2682 /* Do we need the GC_find_limit machinery to find the end of a */
2684 #if defined(HEURISTIC2) || defined(SEARCH_FOR_DATA_START) \
2685 || (!defined(STACKBOTTOM) && defined(HEURISTIC2)) \
2686 || ((defined(SVR4) || defined(AIX) || defined(DGUX) \
2687 || (defined(LINUX) && defined(SPARC))) && !defined(PCR))
2688 # define NEED_FIND_LIMIT
2691 #if defined(DATASTART_USES_BSDGETDATASTART)
2693 # include <machine/trap.h>
2696 # define NEED_FIND_LIMIT
2698 GC_INNER ptr_t GC_FreeBSDGetDataStart(size_t, ptr_t);
2699 # define DATASTART_IS_FUNC
2700 #endif /* DATASTART_USES_BSDGETDATASTART */
2702 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__) \
2703 && !defined(NEED_FIND_LIMIT)
2704 /* Used by GC_init_netbsd_elf() in os_dep.c. */
2705 # define NEED_FIND_LIMIT
2708 #if defined(IA64) && !defined(NEED_FIND_LIMIT)
2709 # define NEED_FIND_LIMIT
2710 /* May be needed for register backing store base. */
2713 #if defined(NEED_FIND_LIMIT) \
2714 || (defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS))
2715 GC_EXTERN JMP_BUF GC_jmp_buf;
2717 /* Set up a handler for address faults which will longjmp to */
2719 GC_INNER void GC_setup_temporary_fault_handler(void);
2720 /* Undo the effect of GC_setup_temporary_fault_handler. */
2721 GC_INNER void GC_reset_fault_handler(void);
2722 #endif /* NEED_FIND_LIMIT || USE_PROC_FOR_LIBRARIES */
2724 /* Some convenience macros for cancellation support. */
2725 #if defined(CANCEL_SAFE)
2726 # if defined(GC_ASSERTIONS) \
2727 && (defined(USE_COMPILER_TLS) \
2728 || (defined(LINUX) && !defined(ARM32) && GC_GNUC_PREREQ(3, 3) \
2729 || defined(HPUX) /* and probably others ... */))
2730 extern __thread unsigned char GC_cancel_disable_count;
2731 # define NEED_CANCEL_DISABLE_COUNT
2732 # define INCR_CANCEL_DISABLE() ++GC_cancel_disable_count
2733 # define DECR_CANCEL_DISABLE() --GC_cancel_disable_count
2734 # define ASSERT_CANCEL_DISABLED() GC_ASSERT(GC_cancel_disable_count > 0)
2736 # define INCR_CANCEL_DISABLE()
2737 # define DECR_CANCEL_DISABLE()
2738 # define ASSERT_CANCEL_DISABLED() (void)0
2739 # endif /* GC_ASSERTIONS & ... */
2740 # define DISABLE_CANCEL(state) \
2741 do { pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &state); \
2742 INCR_CANCEL_DISABLE(); } while (0)
2743 # define RESTORE_CANCEL(state) \
2744 do { ASSERT_CANCEL_DISABLED(); \
2745 pthread_setcancelstate(state, NULL); \
2746 DECR_CANCEL_DISABLE(); } while (0)
2747 #else /* !CANCEL_SAFE */
2748 # define DISABLE_CANCEL(state) (void)0
2749 # define RESTORE_CANCEL(state) (void)0
2750 # define ASSERT_CANCEL_DISABLED() (void)0
2751 #endif /* !CANCEL_SAFE */
2755 #endif /* GC_PRIVATE_H */