(other than read) be handled specially by client code.
See os_dep.c for details.
-2. Information supplied by the programmer. We define "stubborn"
- objects to be objects that are rarely changed. Such an object
- can be allocated (and enabled for writing) with `GC_malloc_stubborn`.
- Once it has been initialized, the collector should be informed with
- a call to `GC_end_stubborn_change`. Subsequent writes that store
- pointers into the object must be preceded by a call to
- `GC_change_stubborn`.
-
-This mechanism performs best for objects that are written only for
-initialization, and such that only one stubborn object is writable
-at once. It is typically not worth using for short-lived
-objects. Stubborn objects are treated less efficiently than pointer-free
-(atomic) objects.
-
-A rough rule of thumb is that, in the absence of VM information, garbage
-collection pauses are proportional to the amount of pointerful storage
-plus the amount of modified "stubborn" storage that is reachable during
-the collection.
-
-Initial allocation of stubborn objects takes longer than allocation
-of other objects, since other data structures need to be maintained.
-
-We recommend against random use of stubborn objects in client
-code, since bugs caused by inappropriate writes to stubborn objects
-are likely to be very infrequently observed and hard to trace.
-However, their use may be appropriate in a few carefully written
-library routines that do not make the objects themselves available
-for writing by client code.
+2. Information supplied by the programmer. The object is considered dirty
+ after a call to `GC_end_stubborn_change` provided the library has been
+ compiled suitably. It is typically not worth using for short-lived objects.
+ Note that bugs caused by a missing `GC_end_stubborn_change` call are
+ likely to be observed very infrequently and hard to trace.
## Bugs
# ifndef GC_NO_FINALIZATION
GC_finalize();
# endif
-# ifdef STUBBORN_ALLOC
- GC_clean_changing_list();
-# endif
-
# ifndef NO_CLOCK
if (GC_print_stats)
GET_TIME(finalize_time);
/* This is debugging code intended to verify the results of dirty bit */
/* computations. Works only in a single threaded environment. */
-/* We assume that stubborn objects are changed only when they are */
-/* enabled for writing. (Certain kinds of writing are actually */
-/* safe under other conditions.) */
#define NSUMS 10000
#define OFFSET 0x10000
return(result | 0x80000000 /* doesn't look like pointer */);
}
-#ifdef STUBBORN_ALLOC
- /* Check whether a stubborn object from the given block appears on */
- /* the appropriate free list. */
- STATIC GC_bool GC_on_free_list(struct hblk *h)
- {
- hdr * hhdr = HDR(h);
- word sz = BYTES_TO_WORDS(hhdr -> hb_sz);
- ptr_t p;
-
- if (sz > MAXOBJWORDS) return(FALSE);
- for (p = GC_sobjfreelist[sz]; p != 0; p = obj_link(p)) {
- if (HBLKPTR(p) == h) return(TRUE);
- }
- return(FALSE);
- }
-#endif
-
int GC_n_dirty_errors = 0;
int GC_n_faulted_dirty_errors = 0;
-int GC_n_changed_errors = 0;
unsigned long GC_n_clean = 0;
unsigned long GC_n_dirty = 0;
/* Set breakpoint here */GC_n_dirty_errors++;
if (was_faulted) GC_n_faulted_dirty_errors++;
}
-# ifdef STUBBORN_ALLOC
- if (!HBLK_IS_FREE(hhdr)
- && hhdr -> hb_obj_kind == STUBBORN
- && !GC_page_was_changed(h)
- && !GC_on_free_list(h)) {
- /* if GC_on_free_list(h) then reclaim may have touched it */
- /* without any allocations taking place. */
- /* Set breakpoint here */GC_n_changed_errors++;
- }
-# endif
}
pe -> new_valid = TRUE;
pe -> block = h + OFFSET;
}
}
-/* Should be called immediately after GC_read_dirty and GC_read_changed. */
+/* Should be called immediately after GC_read_dirty. */
void GC_check_dirty(void)
{
int index;
GC_n_dirty_errors = 0;
GC_n_faulted_dirty_errors = 0;
- GC_n_changed_errors = 0;
GC_n_clean = 0;
GC_n_dirty = 0;
GC_err_printf("Found %d dirty bit errors (%d were faulted)\n",
GC_n_dirty_errors, GC_n_faulted_dirty_errors);
}
- if (GC_n_changed_errors > 0) {
- GC_err_printf("Found %d changed bit errors\n", GC_n_changed_errors);
- GC_err_printf(
- "These may be benign (provoked by nonpointer changes)\n");
-# ifdef THREADS
- GC_err_printf(
- "Also expect 1 per thread currently allocating a stubborn obj\n");
-# endif
- }
for (i = 0; i < GC_n_faulted; ++i) {
GC_faulted[i] = 0; /* Don't expose block pointers to GC */
}
AC_ARG_ENABLE(checksums,
[AS_HELP_STRING([--enable-checksums],
- [Report on erroneously cleared dirty bits and
- unexpectedly altered stubborn objects, at
- substantial performance cost; use only for debugging
- of the incremental collector])])
+ [Report on erroneously cleared dirty bits at
+ substantial performance cost; use only for
+ debugging of the incremental collector])])
if test x$enable_checksums = xyes; then
if test x$enable_munmap != xno -o x$THREADS != xnone; then
AC_MSG_ERROR([CHECKSUMS not compatible with USE_MUNMAP or threads])
kind_str = "ATOMIC_UNCOLLECTABLE";
break;
# endif
- case STUBBORN:
- kind_str = "STUBBORN";
- break;
default:
kind_str = NULL;
/* The alternative is to use snprintf(buffer) but it is */
}
#endif /* DBG_HDRS_ALL */
-#ifdef STUBBORN_ALLOC
- GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_stubborn(size_t lb,
- GC_EXTRA_PARAMS)
- {
- void * result = GC_malloc_stubborn(SIZET_SAT_ADD(lb, DEBUG_BYTES));
-
- return store_debug_info(result, lb, "GC_debug_malloc_stubborn",
- OPT_RA s, i);
- }
-
- GC_API void GC_CALL GC_debug_change_stubborn(const void *p)
- {
- const void * q = GC_base_C(p);
- hdr * hhdr;
-
- if (q == 0) {
- ABORT_ARG1("GC_debug_change_stubborn: bad arg", ": %p", p);
- }
- hhdr = HDR(q);
- if (hhdr -> hb_obj_kind != STUBBORN) {
- ABORT_ARG1("GC_debug_change_stubborn: arg not stubborn", ": %p", p);
- }
- GC_change_stubborn(q);
- }
-
-#else /* !STUBBORN_ALLOC */
-
- GC_API GC_ATTR_MALLOC void * GC_CALL GC_debug_malloc_stubborn(size_t lb,
- GC_EXTRA_PARAMS)
- {
+GC_API void * GC_CALL GC_debug_malloc_stubborn(size_t lb, GC_EXTRA_PARAMS)
+{
return GC_debug_malloc(lb, OPT_RA s, i);
- }
+}
- GC_API void GC_CALL GC_debug_change_stubborn(
+GC_API void GC_CALL GC_debug_change_stubborn(
const void * p GC_ATTR_UNUSED) {}
-#endif /* !STUBBORN_ALLOC */
GC_API void GC_CALL GC_debug_end_stubborn_change(const void *p)
{
if (NULL == q) {
ABORT_ARG1("GC_debug_end_stubborn_change: bad arg", ": %p", p);
}
-# ifdef STUBBORN_ALLOC
- if (HDR(q) -> hb_obj_kind != STUBBORN)
- ABORT_ARG1("GC_debug_end_stubborn_change: arg not stubborn",
- ": %p", p);
-# endif
GC_end_stubborn_change(q);
}
}
hhdr = HDR(base);
switch (hhdr -> hb_obj_kind) {
-# ifdef STUBBORN_ALLOC
- case STUBBORN:
- result = GC_debug_malloc_stubborn(lb, OPT_RA s, i);
- break;
-# endif
case NORMAL:
result = GC_debug_malloc(lb, OPT_RA s, i);
break;
GC_debug_generic_or_special_malloc(size_t lb, int knd, GC_EXTRA_PARAMS)
{
switch (knd) {
-# ifdef STUBBORN_ALLOC
- case STUBBORN:
- return GC_debug_malloc_stubborn(lb, OPT_RA s, i);
-# endif
case PTRFREE:
return GC_debug_malloc_atomic(lb, OPT_RA s, i);
case NORMAL:
The lower part of the new file AmigaOS.c does this in various ways, mainly by
wrapping GC_malloc, GC_malloc_atomic, GC_malloc_uncollectable,
- GC_malloc_atomic_uncollectable, GC_malloc_stubborn, GC_malloc_ignore_off_page
+ GC_malloc_atomic_uncollectable, GC_malloc_ignore_off_page
and GC_malloc_atomic_ignore_off_page. GC_realloc is also wrapped, but
doesn't do the same effort in preventing to return chip-mem.
Other allocating-functions (f.ex. GC_*_typed_) can probably be
with each call frame. Default is zero. Ignored if we don't know how to
retrieve arguments on the platform.
-CHECKSUMS Reports on erroneously clear dirty bits, and unexpectedly
- altered stubborn objects, at substantial performance cost. Use only for
- debugging of the incremental collector. Not compatible with USE_MUNMAP
- or threads.
+CHECKSUMS Reports on erroneously clear dirty bits (at a substantial
+ performance cost). Use only for debugging of the incremental collector.
+ Not compatible with USE_MUNMAP or threads.
GC_GCJ_SUPPORT Includes support for gcj (and possibly other systems
that include a pointer to a type descriptor in each allocated object).
(useful when NO_GETENV). See the similar environment variable description
in README.environment. Requires MAKE_BACK_GRAPH defined.
-STUBBORN_ALLOC Allows allocation of "hard to change" objects, and thus
- makes incremental collection easier. Was enabled by default until 6.0.
- Rarely used, to my knowledge.
-
HANDLE_FORK (Unix and Cygwin only) Attempt by default to make GC_malloc()
work in a child process fork()'ed from a multi-threaded parent. Not fully
POSIX-compliant and could be disabled at runtime (before GC_INIT).
Certain kinds are scanned for pointers, others are not. Some may have
per-object type descriptors that determine pointer locations. Or a specific
kind may correspond to one specific object layout. Two built-in kinds are
-uncollectible. One (`STUBBORN`) is immutable without special precautions.
+uncollectible.
In spite of that, it is very likely that most C clients of the collector
currently use at most two kinds: `NORMAL` and `PTRFREE` objects. The
[GCJ](https://gcc.gnu.org/onlinedocs/gcc-4.8.5/gcj/) runtime also makes heavy
We keep track of modified pages using one of several distinct mechanisms:
- * Through explicit mutator cooperation. Currently this requires the use of
- `GC_malloc_stubborn`, and is rarely used.
* (`MPROTECT_VDB`) By write-protecting physical pages and catching write
faults. This is implemented for many Unix-like systems and for Win32. It is
not possible in a few environments.
performance may actually be better with `mprotect` and signals.)
* (`PCR_VDB`) By relying on an external dirty bit implementation, in this
case the one in Xerox PCR.
+ * (`MANUAL_VDB`) Through explicit mutator cooperation. This requires the
+ client code to call `GC_end_stubborn_change`, and is rarely used.
* (`DEFAULT_VDB`) By treating all pages as dirty. This is the default
- if none of the other techniques is known to be usable, and
- `GC_malloc_stubborn` is not used. (Practical only for testing, or if the
- vast majority of objects use `GC_malloc_stubborn`.)
+ if none of the other techniques is known to be usable. (Practical only for
+ testing.)
## Black-listing
like, instead of performing an entire collection at once. This is likely
to increase total running time. It will improve response on a platform that
either has suitable support in the garbage collector (Linux and most Unix
-versions, Win32 if the collector was suitably built) or if _stubborn_
-allocation is used (see `gc.h`). On many platforms this interacts poorly with
-system calls that write to the garbage collected heap.
+versions, Win32 if the collector was suitably built). On many platforms this
+interacts poorly with system calls that write to the garbage collected heap.
**`GC_warn_proc GC_set_warn_proc(GC_warn_proc)`** - Replace the default
procedure used by the collector to print warnings. The collector may otherwise
at least the file name and line number at the allocation point to be saved
as part of the object. Leak reports will then also include this information.
-Many collector features (e.g. stubborn objects, finalization, and
-disappearing links) are less useful in this context, and are not fully
-supported. Their use will usually generate additional bogus leak reports,
-since the collector itself drops some associated objects.
+Many collector features (e.g. finalization and disappearing links) are less
+useful in this context, and are not fully supported. Their use will usually
+generate additional bogus leak reports, since the collector itself drops some
+associated objects.
The same is generally true of thread support. However, as of 6.0alpha4,
correct leak reports should be generated with linuxthreads.
/* General purpose allocation routines, with roughly malloc calling */
/* conv. The atomic versions promise that no relevant pointers are */
/* contained in the object. The non-atomic versions guarantee that the */
-/* new object is cleared. GC_malloc_stubborn promises that no changes */
-/* to the object will occur after GC_end_stubborn_change has been */
-/* called on the result of GC_malloc_stubborn. GC_malloc_uncollectable */
-/* allocates an object that is scanned for pointers to collectible */
+/* new object is cleared. GC_malloc_uncollectable allocates */
+/* an object that is scanned for pointers to collectible */
/* objects, but is not itself collectible. The object is scanned even */
/* if it does not appear to be reachable. GC_malloc_uncollectable and */
/* GC_free called on the resulting object implicitly update */
/* GC_non_gc_bytes appropriately. */
-/* Note that the GC_malloc_stubborn support doesn't really exist */
-/* anymore. MANUAL_VDB provides comparable functionality. */
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_malloc(size_t /* size_in_bytes */);
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_strndup(const char *, size_t) GC_ATTR_NONNULL(1);
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_malloc_uncollectable(size_t /* size_in_bytes */);
-GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
- GC_malloc_stubborn(size_t /* size_in_bytes */);
+GC_API GC_ATTR_DEPRECATED void * GC_CALL GC_malloc_stubborn(size_t);
/* GC_memalign() is not well tested. */
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(2) void * GC_CALL
/* Explicitly deallocate an object. Dangerous if used incorrectly. */
/* Requires a pointer to the base of an object. */
-/* If the argument is stubborn, it should not be changeable when freed. */
/* An object should not be enabled for finalization (and it should not */
/* contain registered disappearing links of any kind) when it is */
/* explicitly deallocated. */
/* GC_free(0) is a no-op, as required by ANSI C for free. */
GC_API void GC_CALL GC_free(void *);
-/* Stubborn objects may be changed only if the collector is explicitly */
-/* informed. The collector is implicitly informed of coming change */
-/* when such an object is first allocated. The following routines */
-/* inform the collector that an object will no longer be changed, or */
-/* that it will once again be changed. Only non-NULL pointer stores */
-/* into the object are considered to be changes. The argument to */
-/* GC_end_stubborn_change must be exactly the value returned by */
-/* GC_malloc_stubborn or passed to GC_change_stubborn. (In the second */
-/* case, it may be an interior pointer within 512 bytes of the */
-/* beginning of the objects.) There is a performance penalty for */
-/* allowing more than one stubborn object to be changed at once, but it */
-/* is acceptable to do so. The same applies to dropping stubborn */
-/* objects that are still changeable. */
-GC_API void GC_CALL GC_change_stubborn(const void *) GC_ATTR_NONNULL(1);
+/* The "stubborn" objects allocation is not supported anymore. Exists */
+/* only for the backward compatibility. */
+#define GC_MALLOC_STUBBORN(sz) GC_MALLOC(sz)
+#define GC_NEW_STUBBORN(t) GC_NEW(t)
+#define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
+GC_API GC_ATTR_DEPRECATED void GC_CALL GC_change_stubborn(const void *);
+
+/* Inform the collector that the object has been changed. */
+/* Only non-NULL pointer stores into the object are considered to be */
+/* changes. Matters only if the library has been compiled with */
+/* MANUAL_VDB defined (otherwise the function does nothing). */
GC_API void GC_CALL GC_end_stubborn_change(const void *) GC_ATTR_NONNULL(1);
/* Return a pointer to the base (lowest address) of an object given */
/* or with the standard C library, your code is broken. In my */
/* opinion, it shouldn't have been invented, but now we're stuck. -HB */
/* The resulting object has the same kind as the original. */
-/* If the argument is stubborn, the result will have changes enabled. */
/* It is an error to have changes enabled for the original object. */
/* It does not change the content of the object from its beginning to */
/* the minimum of old size and new_size_in_bytes; the content above in */
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_debug_malloc_uncollectable(size_t /* size_in_bytes */,
GC_EXTRA_PARAMS);
-GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
+GC_API GC_ATTR_DEPRECATED void * GC_CALL
GC_debug_malloc_stubborn(size_t /* size_in_bytes */, GC_EXTRA_PARAMS);
GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL
GC_debug_malloc_ignore_off_page(size_t /* size_in_bytes */,
GC_API void * GC_CALL GC_debug_realloc(void * /* old_object */,
size_t /* new_size_in_bytes */, GC_EXTRA_PARAMS)
/* 'realloc' attr */ GC_ATTR_ALLOC_SIZE(2);
-GC_API void GC_CALL GC_debug_change_stubborn(const void *) GC_ATTR_NONNULL(1);
+GC_API
+#if !defined(CPPCHECK)
+ GC_ATTR_DEPRECATED
+#endif
+void GC_CALL GC_debug_change_stubborn(const void *);
GC_API void GC_CALL GC_debug_end_stubborn_change(const void *)
GC_ATTR_NONNULL(1);
GC_debug_register_finalizer_no_order(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_UNREACHABLE(p, f, d, of, od) \
GC_debug_register_finalizer_unreachable(p, f, d, of, od)
-# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS)
-# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
GC_general_register_disappearing_link(link, \
GC_register_finalizer_no_order(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_UNREACHABLE(p, f, d, of, od) \
GC_register_finalizer_unreachable(p, f, d, of, od)
-# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
-# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
# define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \
GC_general_register_disappearing_link(link, obj)
/* may return NULL (if out of memory). */
#define GC_NEW(t) ((t*)GC_MALLOC(sizeof(t)))
#define GC_NEW_ATOMIC(t) ((t*)GC_MALLOC_ATOMIC(sizeof(t)))
-#define GC_NEW_STUBBORN(t) ((t*)GC_MALLOC_STUBBORN(sizeof(t)))
#define GC_NEW_UNCOLLECTABLE(t) ((t*)GC_MALLOC_UNCOLLECTABLE(sizeof(t)))
#ifdef GC_REQUIRE_WCSDUP
(*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic)
# define GC_malloc_uncollectable(a) \
(*GC_amiga_allocwrapper_do)(a,GC_malloc_uncollectable)
-# define GC_malloc_stubborn(a) \
- (*GC_amiga_allocwrapper_do)(a,GC_malloc_stubborn)
# define GC_malloc_atomic_uncollectable(a) \
(*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_uncollectable)
# define GC_malloc_ignore_off_page(a) \
/* */
/*********************************/
-/* #define STUBBORN_ALLOC */
- /* Enable stubborn allocation, and thus a limited */
- /* form of incremental collection w/o dirty bits. */
-
/* #define ALL_INTERIOR_POINTERS */
/* Forces all pointers into the interior of an */
/* object to be considered valid. Also causes the */
/* Number of granules to allocate when asked for a certain */
/* number of bytes. Should be accessed with the allocation */
/* lock held. */
-# ifdef STUBBORN_ALLOC
-# define GC_sobjfreelist GC_arrays._sobjfreelist
- ptr_t _sobjfreelist[MAXOBJGRANULES+1];
- /* Free list for immutable objects. */
-# endif
# ifdef MARK_BIT_PER_GRANULE
# define GC_obj_map GC_arrays._obj_map
unsigned short * _obj_map[MAXOBJGRANULES + 1];
char _valid_offsets[VALID_OFFSET_SZ];
/* GC_valid_offsets[i] == TRUE ==> i */
/* is registered as a displacement. */
-# ifdef STUBBORN_ALLOC
-# define GC_changed_pages GC_arrays._changed_pages
- page_hash_table _changed_pages;
- /* Stubborn object pages that were changes since last call to */
- /* GC_read_changed. */
-# define GC_prev_changed_pages GC_arrays._prev_changed_pages
- page_hash_table _prev_changed_pages;
- /* Stubborn object pages that were changes before last call to */
- /* GC_read_changed. */
-# endif
# if defined(PROC_VDB) || defined(MPROTECT_VDB) \
|| defined(GWW_VDB) || defined(MANUAL_VDB)
# define GC_grungy_pages GC_arrays._grungy_pages
#define UNCOLLECTABLE 2
#ifdef GC_ATOMIC_UNCOLLECTABLE
# define AUNCOLLECTABLE 3
-# define STUBBORN 4
# define IS_UNCOLLECTABLE(k) (((k) & ~1) == UNCOLLECTABLE)
+# define GC_N_KINDS_INITIAL_VALUE 4
#else
-# define STUBBORN 3
# define IS_UNCOLLECTABLE(k) ((k) == UNCOLLECTABLE)
+# define GC_N_KINDS_INITIAL_VALUE 3
#endif
GC_EXTERN unsigned GC_n_kinds;
/* Same as GC_base but excepts and returns a pointer to const object. */
#define GC_base_C(p) ((const void *)GC_base((/* no const */ void *)(p)))
-/* Stubborn objects: */
-void GC_read_changed(void); /* Analogous to GC_read_dirty */
-GC_bool GC_page_was_changed(struct hblk * h);
- /* Analogous to GC_page_was_dirty */
-void GC_clean_changing_list(void);
- /* Collect obsolete changing list entries */
-void GC_stubborn_init(void);
-
/* Debugging print routines: */
void GC_print_block_list(void);
void GC_print_hblkfreelist(void);
GC_size_map[low_limit] = granule_sz;
}
-/* Allocate lb bytes for an object of kind k. */
-/* Should not be used to directly to allocate */
-/* objects such as STUBBORN objects that */
-/* require special handling on allocation. */
+/* Allocate lb bytes for an object of kind k. */
+/* Should not be used to directly to allocate objects */
+/* that require special handling on allocation. */
GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
{
void *op;
int knd)
{
switch(knd) {
-# ifdef STUBBORN_ALLOC
- case STUBBORN:
- return GC_malloc_stubborn(lb);
-# endif
case PTRFREE:
case NORMAL:
return GC_malloc_kind(lb, knd);
}
if (ADD_SLOP(lb) <= sz) {
if (lb >= (sz >> 1)) {
-# ifdef STUBBORN_ALLOC
- if (obj_kind == STUBBORN) GC_change_stubborn(p);
-# endif
if (orig_sz > lb) {
/* Clear unneeded part of object to avoid bogus pointer */
/* tracing. */
- /* Safe for stubborn objects. */
BZERO(((ptr_t)p) + lb, orig_sz - lb);
}
return(p);
/* 0 | */ GC_DS_LENGTH, FALSE /* add length to descr */, FALSE
/*, */ OK_DISCLAIM_INITZ },
# endif
-# ifdef STUBBORN_ALLOC
- { (void **)&GC_sobjfreelist[0], 0,
- /* 0 | */ GC_DS_LENGTH, TRUE /* add length to descr */, TRUE
- /*, */ OK_DISCLAIM_INITZ },
-# endif
};
-# ifdef STUBBORN_ALLOC
-# define GC_N_KINDS_INITIAL_VALUE (STUBBORN+1)
-# else
-# define GC_N_KINDS_INITIAL_VALUE STUBBORN
-# endif
-
GC_INNER unsigned GC_n_kinds = GC_N_KINDS_INITIAL_VALUE;
# ifndef INITIAL_MARK_STACK_SIZE
/* let it grow dynamically. */
# endif
-#if !defined(GC_DISABLE_INCREMENTAL) || defined(STUBBORN_ALLOC)
+#if !defined(GC_DISABLE_INCREMENTAL)
STATIC word GC_n_rescuing_pages = 0;
/* Number of dirty pages we marked from */
/* excludes ptrfree pages, etc. */
# endif
}
# endif
-# ifdef STUBBORN_ALLOC
- GC_read_changed();
-# endif
# ifdef CHECKSUMS
if (GC_incremental) GC_check_dirty();
# endif
-# if !defined(GC_DISABLE_INCREMENTAL) || defined(STUBBORN_ALLOC)
+# if !defined(GC_DISABLE_INCREMENTAL)
GC_n_rescuing_pages = 0;
# endif
if (GC_mark_state == MS_NONE) {
} else {
scan_ptr = GC_push_next_marked_dirty(scan_ptr);
if (scan_ptr == 0) {
-# if !defined(GC_DISABLE_INCREMENTAL) \
- || defined(STUBBORN_ALLOC)
+# if !defined(GC_DISABLE_INCREMENTAL)
GC_COND_LOG_PRINTF("Marked from %lu dirty pages\n",
(unsigned long)GC_n_rescuing_pages);
# endif
/* Some quick shortcuts: */
if ((/* 0 | */ GC_DS_LENGTH) == descr) return;
if (GC_block_empty(hhdr)/* nothing marked */) return;
-# if !defined(GC_DISABLE_INCREMENTAL) || defined(STUBBORN_ALLOC)
+# if !defined(GC_DISABLE_INCREMENTAL)
GC_n_rescuing_pages++;
# endif
GC_objects_are_marked = TRUE;
if ((/* 0 | */ GC_DS_LENGTH) == descr)
return;
-# if !defined(GC_DISABLE_INCREMENTAL) || defined(STUBBORN_ALLOC)
+# if !defined(GC_DISABLE_INCREMENTAL)
GC_n_rescuing_pages++;
# endif
GC_objects_are_marked = TRUE;
if (NULL == h) ABORT("Bad HDR() definition");
# endif
}
-# ifdef STUBBORN_ALLOC
- if (hhdr -> hb_obj_kind == STUBBORN) {
- if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr))
- break;
- } else
-# endif
- /* else */ {
- if (GC_block_was_dirty(h, hhdr)) break;
- }
+ if (GC_block_was_dirty(h, hhdr))
+ break;
h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
hhdr = HDR(h);
}
if (GC_dont_gc || GC_dont_precollect)
GC_with_callee_saves_pushed(callee_saves_pushed_dummy_fn, NULL);
# endif
-# ifdef STUBBORN_ALLOC
- GC_stubborn_init();
-# endif
# ifndef DONT_USE_ATEXIT
if (GC_find_leak) {
/* This is to give us at least one chance to detect leaks. */
void GC_dirty(ptr_t p);
#endif
-GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_stubborn(size_t lb)
+GC_API void * GC_CALL GC_malloc_stubborn(size_t lb)
{
return(GC_malloc(lb));
}
#endif
/* Allocation Statistics. Synchronization is not strictly necessary. */
-volatile AO_t stubborn_count = 0;
volatile AO_t uncollectable_count = 0;
volatile AO_t collectable_count = 0;
volatile AO_t atomic_count = 0;
int *p;
unsigned my_extra = (unsigned)AO_fetch_and_add1(&extra_count) % 5000;
- r = (sexpr) GC_MALLOC_STUBBORN(sizeof(struct SEXPR) + my_extra);
+ r = (sexpr)GC_MALLOC(sizeof(struct SEXPR) + my_extra);
CHECK_OUT_OF_MEMORY(r);
- AO_fetch_and_add1(&stubborn_count);
+ AO_fetch_and_add1(&collectable_count);
for (p = (int *)r;
(word)p < (word)r + my_extra + sizeof(struct SEXPR); p++) {
if (*p) {
/* of 49 integers. Thus, this is thread safe without locks, */
/* assuming acquire/release barriers in a_get/set() and atomic */
/* pointer assignments (otherwise, e.g., check_ints() may see */
- /* an uninitialized object returned by GC_MALLOC_STUBBORN). */
+ /* an uninitialized object returned by GC_MALLOC). */
a_set(reverse(reverse(a_get())));
# if !defined(AT_END) && !defined(THREADS)
/* This is not thread safe, since realloc explicitly deallocates */
GC_printf("Allocated %d uncollectable objects\n",
(int)uncollectable_count);
GC_printf("Allocated %d atomic objects\n", (int)atomic_count);
- GC_printf("Allocated %d stubborn objects\n", (int)stubborn_count);
GC_printf("Reallocated %d objects\n", (int)realloc_count);
GC_printf("Finalized %d/%d objects - ",
finalized_count, finalizable_count);