2009-10-22 Ivan Maidanski <ivmai@mail.ru>
+ * allchblk.c (GC_unmap_old, GC_merge_unmapped, GC_allochblk,
+ GC_freehblk): Use GC_INNER for the function definition.
+ * alloc.c (GC_never_stop_func, GC_should_collect,
+ GC_try_to_collect_inner, GC_collect_a_little_inner,
+ GC_set_fl_marks, GC_add_to_our_memory, GC_add_to_heap,
+ GC_expand_hp_inner, GC_collect_or_expand, GC_allocobj): Ditto.
+ * backgraph.c (GC_build_back_graph, GC_traverse_back_graph):
+ Ditto.
+ * blacklst.c (GC_default_print_heap_obj_proc, GC_bl_init,
+ GC_promote_black_lists, GC_unpromote_black_lists,
+ GC_add_to_black_list_normal, GC_add_to_black_list_stack,
+ GC_is_black_listed): Ditto.
+ * darwin_stop_world.c (GC_push_all_stacks, GC_push_all_stacks,
+ GC_stop_init, GC_stop_world, GC_start_world): Ditto.
+ * dbg_mlc.c (GC_has_other_debug_info, GC_store_back_pointer,
+ GC_marked_for_finalization, GC_generate_random_backtrace_no_gc,
+ GC_store_debug_info, GC_start_debugging,
+ GC_debug_generic_malloc_inner,
+ GC_debug_generic_malloc_inner_ignore_off_page,
+ GC_debug_malloc_uncollectable, GC_debug_free_inner): Ditto.
+ * dyn_load.c (GC_register_dynamic_libraries,
+ GC_register_main_static_data, GC_init_dyld): Ditto.
+ * finalize.c (GC_push_finalizer_structures, GC_finalize,
+ GC_notify_or_invoke_finalizers, GC_print_finalization_stats):
+ Ditto.
+ * gcj_mlc.c (GC_core_gcj_malloc): Ditto.
+ * headers.c (GC_find_header, GC_header_cache_miss,
+ GC_scratch_alloc, GC_init_headers, GC_install_header,
+ GC_install_counts, GC_remove_header, GC_remove_counts,
+ GC_next_used_block, GC_prev_block): Ditto.
+ * mach_dep.c (GC_with_callee_saves_pushed): Ditto.
+ * malloc.c (GC_collect_or_expand, GC_alloc_large,
+ GC_generic_malloc_inner, GC_generic_malloc_inner_ignore_off_page,
+ GC_core_malloc_atomic, GC_core_malloc, GC_free_inner): Ditto.
+ * mallocx.c (GC_generic_malloc_ignore_off_page): Ditto.
+ * mark.c (GC_collection_in_progress, GC_clear_hdr_marks,
+ GC_set_hdr_marks, GC_set_mark_bit, GC_clear_mark_bit,
+ GC_clear_marks, GC_initiate_gc, GC_mark_some,
+ GC_mark_stack_empty, GC_invalidate_mark_state,
+ GC_signal_mark_stack_overflow, GC_mark_from, GC_help_marker,
+ GC_mark_init, GC_push_all, GC_push_conditional,
+ GC_mark_and_push_stack, GC_push_all_eager, GC_push_all_stack):
+ Ditto.
+ * mark_rts.c (GC_is_static_root, GC_roots_present, GC_approx_sp,
+ GC_exclude_static_roots_inner, GC_push_all_register_frames,
+ GC_push_all_stack_frames, GC_cond_register_dynamic_libraries,
+ GC_push_roots): Ditto.
+ * misc.c (GC_extend_size_map, GC_clear_stack, GC_err_write):
+ Ditto.
+ * new_hblk.c (GC_build_fl, GC_new_hblk): Ditto.
+ * obj_map.c (GC_register_displacement_inner, GC_add_map_entry,
+ GC_initialize_offsets): Ditto.
+ * os_dep.c (GC_get_maps, GC_parse_map_entry, GC_text_mapping,
+ GC_init_linux_data_start, GC_init_netbsd_elf, GC_setpagesize,
+ GC_set_and_save_fault_handler, GC_setup_temporary_fault_handler,
+ GC_reset_fault_handler, GC_get_register_stack_base, GC_init_win32,
+ GC_add_current_malloc_heap, GC_is_heap_base, GC_unmap, GC_remap,
+ GC_unmap_gap, GC_push_all_stacks, GC_gww_dirty_init,
+ GC_dirty_init, GC_read_dirty, GC_page_was_dirty,
+ GC_page_was_ever_dirty, GC_remove_protection,
+ GC_write_fault_handler, GC_mprotect_stop, GC_mprotect_resume,
+ GC_save_callers, GC_print_callers): Ditto.
+ * pthread_stop_world.c (GC_push_all_stacks, GC_stop_world,
+ GC_start_world, GC_stop_init): Ditto.
+ * pthread_support.c (GC_mark_thread_local_free_lists,
+ GC_lookup_thread, GC_reset_finalizer_nested,
+ GC_check_finalizer_nested, GC_segment_is_thread_stack,
+ GC_greatest_stack_base_below, GC_thr_init, GC_init_parallel,
+ GC_do_blocking_inner, GC_lock, GC_acquire_mark_lock,
+ GC_release_mark_lock, GC_wait_for_reclaim, GC_notify_all_builder,
+ GC_wait_marker, GC_notify_all_marker): Ditto.
+ * reclaim.c (GC_print_all_errors, GC_block_empty,
+ GC_reclaim_generic, GC_start_reclaim, GC_continue_reclaim,
+ GC_reclaim_all): Ditto.
+ * thread_local_alloc.c (GC_init_thread_local,
+ GC_destroy_thread_local, GC_mark_thread_local_fls_for): Ditto.
+ * win32_threads.c (GC_reset_finalizer_nested,
+ GC_check_finalizer_nested, GC_do_blocking_inner, GC_stop_world,
+ GC_start_world, GC_push_all_stacks, GC_get_next_stack,
+ GC_acquire_mark_lock, GC_release_mark_lock, GC_wait_for_reclaim,
+ GC_notify_all_builder, GC_wait_marker, GC_notify_all_marker,
+ GC_thr_init, GC_init_parallel, GC_lock,
+ GC_mark_thread_local_free_lists): Ditto.
+ * alloc.c (GC_add_current_malloc_heap, GC_build_back_graph,
+ GC_traverse_back_graph): Use GC_INNER for the function prototype.
+ * darwin_stop_world.c (GC_mprotect_stop, GC_mprotect_resume):
+ Ditto.
+ * dbg_mlc.c (GC_default_print_heap_obj_proc): Ditto.
+ * dyn_load.c (GC_parse_map_entry, GC_get_maps,
+ GC_segment_is_thread_stack, GC_roots_present, GC_is_heap_base,
+ GC_get_next_stack): Ditto.
+ * finalize.c (GC_reset_finalizer_nested,
+ GC_check_finalizer_nested): Ditto.
+ * gcj_mlc.c (GC_start_debugging): Ditto.
+ * include/private/dbg_mlc.h (GC_save_callers, GC_print_callers,
+ GC_has_other_debug_info, GC_store_debug_info): Ditto.
+ * include/private/gc_hdrs.h (GC_header_cache_miss): Ditto.
+ * include/private/gc_locks.h (GC_lock): Ditto.
+ * include/private/gc_pmark.h (GC_signal_mark_stack_overflow,
+ GC_mark_from): Ditto.
+ * include/private/pthread_support.h (GC_lookup_thread,
+ GC_stop_init): Ditto.
+ * include/private/thread_local_alloc.h (GC_init_thread_local,
+ GC_destroy_thread_local, GC_mark_thread_local_fls_for): Ditto.
+ * malloc.c (GC_extend_size_map, GC_text_mapping): Ditto.
+ * mark.c (GC_page_was_ever_dirty): Ditto.
+ * mark_rts.c (GC_mark_thread_local_free_lists): Ditto.
+ * misc.c (GC_register_main_static_data, GC_init_win32,
+ GC_setpagesize, GC_init_linux_data_start,
+ GC_set_and_save_fault_handler, GC_init_dyld, GC_init_netbsd_elf,
+ GC_do_blocking_inner): Ditto.
+ * os_dep.c (GC_greatest_stack_base_below): Ditto.
+ * win32_threads.c (GC_write_fault_handler, GC_gww_dirty_init):
+ Ditto.
+ * include/private/gc_priv.h: Ditto (for most prototypes).
+ * include/private/gc_priv.h (GC_INNER): Update the comment.
+ * doc/README.macros (GC_DLL): Update.
+
+2009-10-22 Ivan Maidanski <ivmai@mail.ru>
+
* alloc.c (GC_collection_in_progress): Move the prototype to
gc_priv.h.
* gc_dlopen.c (GC_collection_in_progress): Ditto.
# endif /* MARK_BIT_PER_GRANULE */
/* Clear mark bits */
- GC_clear_hdr_marks(hhdr);
+ GC_clear_hdr_marks(hhdr);
hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
return(TRUE);
/* Unmap blocks that haven't been recently touched. This is the only way */
/* way blocks are ever unmapped. */
-void GC_unmap_old(void)
+GC_INNER void GC_unmap_old(void)
{
struct hblk * h;
hdr * hhdr;
/* Merge all unmapped blocks that are adjacent to other free */
/* blocks. This may involve remapping, since all blocks are either */
/* fully mapped or fully unmapped. */
-void GC_merge_unmapped(void)
+GC_INNER void GC_merge_unmapped(void)
{
struct hblk * h, *next;
hdr * hhdr, *nexthdr;
*
* The client is responsible for clearing the block, if necessary.
*/
-struct hblk *
+GC_INNER struct hblk *
GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
{
word blocks;
* The may_split flag indicates whether it's OK to split larger blocks.
*/
STATIC struct hblk *
-GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, GC_bool may_split)
+GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n,
+ GC_bool may_split)
{
struct hblk *hbp;
hdr * hhdr; /* Header corr. to hbp */
*
* All mark words are assumed to be cleared.
*/
-void
-GC_freehblk(struct hblk *hbp)
+GC_INNER void GC_freehblk(struct hblk *hbp)
{
struct hblk *next, *prev;
hdr *hhdr, *prevhdr, *nexthdr;
if (size <= 0)
ABORT("Deallocating excessively large block. Too large an allocation?");
/* Probably possible if we try to allocate more than half the address */
- /* space at once. If we dont catch it here, strange things happen */
+ /* space at once. If we don't catch it here, strange things happen */
/* later. */
GC_remove_counts(hbp, (word)size);
hhdr->hb_sz = size;
word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR;
-int GC_CALLBACK GC_never_stop_func(void)
+GC_INNER int GC_CALLBACK GC_never_stop_func(void)
{
return(0);
}
STATIC word GC_collect_at_heapsize = (word)(-1);
/* Have we allocated enough to amortize a collection? */
-GC_bool GC_should_collect(void)
+GC_INNER GC_bool GC_should_collect(void)
{
static word last_min_bytes_allocd;
static word last_gc_no;
* not GC_never_stop_func then abort if stop_func returns TRUE.
* Return TRUE if we successfully completed the collection.
*/
-GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
+GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
{
# ifndef SMALL_CONFIG
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
STATIC int GC_deficit = 0;/* The number of extra calls to GC_mark_some */
/* that we have made. */
-void GC_collect_a_little_inner(int n)
+GC_INNER void GC_collect_a_little_inner(int n)
{
int i;
IF_CANCEL(int cancel_state;)
}
#if !defined(REDIRECT_MALLOC) && (defined(MSWIN32) || defined(MSWINCE))
- void GC_add_current_malloc_heap(void);
+ GC_INNER void GC_add_current_malloc_heap(void);
#endif
#ifdef MAKE_BACK_GRAPH
- void GC_build_back_graph(void);
+ GC_INNER void GC_build_back_graph(void);
#endif
#ifndef SMALL_CONFIG
}
/* Set all mark bits for the free list whose first entry is q */
-void GC_set_fl_marks(ptr_t q)
+GC_INNER void GC_set_fl_marks(ptr_t q)
{
struct hblk *h, *last_h;
hdr *hhdr;
#endif
#ifdef MAKE_BACK_GRAPH
- void GC_traverse_back_graph(void);
+ GC_INNER void GC_traverse_back_graph(void);
#endif
/* Finish up a collection. Assumes mark bits are consistent, lock is */
#ifdef USE_PROC_FOR_LIBRARIES
/* Add HBLKSIZE aligned, GET_MEM-generated block to GC_our_memory. */
/* Defined to do nothing if USE_PROC_FOR_LIBRARIES not set. */
- void GC_add_to_our_memory(ptr_t p, size_t bytes)
+ GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes)
{
if (0 == p) return;
if (GC_n_memory >= MAX_HEAP_SECTS)
* Use the chunk of memory starting at p of size bytes as part of the heap.
* Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
*/
-void GC_add_to_heap(struct hblk *p, size_t bytes)
+GC_INNER void GC_add_to_heap(struct hblk *p, size_t bytes)
{
hdr * phdr;
word endp;
* Tiny values of n are rounded up.
* Returns FALSE on failure.
*/
-GC_bool GC_expand_hp_inner(word n)
+GC_INNER GC_bool GC_expand_hp_inner(word n)
{
word bytes;
struct hblk * space;
/* free blocks available. Should be called until the blocks are */
/* available (seting retry value to TRUE unless this is the first call */
/* in a loop) or until it fails by returning FALSE. */
-GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page,
- GC_bool retry)
+GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
+ GC_bool ignore_off_page,
+ GC_bool retry)
{
GC_bool gc_not_stopped = TRUE;
word blocks_to_get;
* The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
* Assumes we hold the allocator lock.
*/
-ptr_t GC_allocobj(size_t gran, int kind)
+GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
{
void ** flh = &(GC_obj_kinds[kind].ok_freelist[gran]);
GC_bool tried_minor = FALSE;
GC_ASSERT(p == GC_base(p) && q == GC_base(q));
if (!GC_HAS_DEBUG_INFO(q) || !GC_HAS_DEBUG_INFO(p)) {
- /* This is really a misinterpreted free list link, since we saw */
- /* a pointer to a free list. Dont overwrite it! */
+ /* This is really a misinterpreted free list link, since we saw */
+ /* a pointer to a free list. Don't overwrite it! */
return;
}
if (0 == old_back_ptr) {
/* Rebuild the representation of the backward reachability graph. */
/* Does not examine mark bits. Can be called before GC. */
-void GC_build_back_graph(void)
+GC_INNER void GC_build_back_graph(void)
{
GC_apply_to_each_object(add_back_edges);
}
STATIC word GC_max_max_height = 0;
-void GC_traverse_back_graph(void)
+GC_INNER void GC_traverse_back_graph(void)
{
GC_max_height = 0;
GC_apply_to_each_object(update_max_height);
GC_deepest_obj = 0;
}
-#endif /* !MAKE_BACK_GRAPH */
+#endif /* MAKE_BACK_GRAPH */
STATIC void GC_clear_bl(word *);
-void GC_default_print_heap_obj_proc(ptr_t p)
+GC_INNER void GC_default_print_heap_obj_proc(ptr_t p)
{
ptr_t base = GC_base(p);
GC_err_printf("start: %p, appr. length: %ld", base,
}
#endif
-void GC_bl_init(void)
+GC_INNER void GC_bl_init(void)
{
if (!GC_all_interior_pointers) {
GC_old_normal_bl = (word *)
/* Signal the completion of a collection. Turn the incomplete black */
/* lists into new black lists, etc. */
-void GC_promote_black_lists(void)
+GC_INNER void GC_promote_black_lists(void)
{
word * very_old_normal_bl = GC_old_normal_bl;
word * very_old_stack_bl = GC_old_stack_bl;
}
}
-void GC_unpromote_black_lists(void)
+GC_INNER void GC_unpromote_black_lists(void)
{
if (!GC_all_interior_pointers) {
GC_copy_bl(GC_old_normal_bl, GC_incomplete_normal_bl);
/* the plausible heap bounds. */
/* Add it to the normal incomplete black list if appropriate. */
#ifdef PRINT_BLACK_LIST
- void GC_add_to_black_list_normal(word p, ptr_t source)
+ GC_INNER void GC_add_to_black_list_normal(word p, ptr_t source)
#else
- void GC_add_to_black_list_normal(word p)
+ GC_INNER void GC_add_to_black_list_normal(word p)
#endif
{
if (!(GC_modws_valid_offsets[p & (sizeof(word)-1)])) return;
/* And the same for false pointers from the stack. */
#ifdef PRINT_BLACK_LIST
- void GC_add_to_black_list_stack(word p, ptr_t source)
+ GC_INNER void GC_add_to_black_list_stack(word p, ptr_t source)
#else
- void GC_add_to_black_list_stack(word p)
+ GC_INNER void GC_add_to_black_list_stack(word p)
#endif
{
word index = PHT_HASH((word)p);
* If (h,len) is not black listed, return 0.
* Knows about the structure of the black list hash tables.
*/
-struct hblk * GC_is_black_listed(struct hblk *h, word len)
+GC_INNER struct hblk * GC_is_black_listed(struct hblk *h, word len)
{
word index = PHT_HASH((word)h);
word i;
#ifdef DARWIN_DONT_PARSE_STACK
-void GC_push_all_stacks(void)
+GC_INNER void GC_push_all_stacks(void)
{
int i;
kern_return_t r;
return (unsigned long)frame;
}
-void GC_push_all_stacks(void)
+GC_INNER void GC_push_all_stacks(void)
{
unsigned int i;
task_t my_task;
static struct GC_mach_thread GC_mach_threads[THREAD_TABLE_SZ];
STATIC int GC_mach_threads_count = 0;
-void GC_stop_init(void)
+GC_INNER void GC_stop_init(void)
{
int i;
}
#ifdef MPROTECT_VDB
- void GC_mprotect_stop(void);
- void GC_mprotect_resume(void);
+ GC_INNER void GC_mprotect_stop(void);
+ GC_INNER void GC_mprotect_resume(void);
#endif
/* Caller holds allocation lock. */
-void GC_stop_world(void)
+GC_INNER void GC_stop_world(void)
{
unsigned int i, changes;
task_t my_task = current_task();
/* Caller holds allocation lock, and has held it continuously since */
/* the world stopped. */
-void GC_start_world(void)
+GC_INNER void GC_start_world(void)
{
task_t my_task = current_task();
mach_port_t my_thread = mach_thread_self();
#endif
#include <string.h>
-void GC_default_print_heap_obj_proc(ptr_t p);
+GC_INNER void GC_default_print_heap_obj_proc(ptr_t p);
GC_API void GC_CALL GC_register_finalizer_no_order(void * obj,
GC_finalization_proc fn, void * cd,
/* on free lists may not have debug information set. Thus it's */
/* not always safe to return TRUE, even if the client does */
/* its part. */
- GC_bool GC_has_other_debug_info(ptr_t p)
+ GC_INNER GC_bool GC_has_other_debug_info(ptr_t p)
{
oh * ohdr = (oh *)p;
ptr_t body = (ptr_t)(ohdr + 1);
/* small, and this shouldn't be used in production code. */
/* We assume that dest is the real base pointer. Source will usually */
/* be a pointer to the interior of an object. */
- void GC_store_back_pointer(ptr_t source, ptr_t dest)
+ GC_INNER void GC_store_back_pointer(ptr_t source, ptr_t dest)
{
if (GC_HAS_DEBUG_INFO(dest)) {
((oh *)dest) -> oh_back_ptr = HIDE_BACK_PTR(source);
}
}
- void GC_marked_for_finalization(ptr_t dest)
+ GC_INNER void GC_marked_for_finalization(ptr_t dest)
{
GC_store_back_pointer(MARKED_FOR_FINALIZATION, dest);
}
/* Force a garbage collection and generate a backtrace from a */
/* random heap address. */
- void GC_generate_random_backtrace_no_gc(void)
+ GC_INNER void GC_generate_random_backtrace_no_gc(void)
{
void * current;
current = GC_generate_random_valid_address();
(((word)(p + sizeof(oh) + sz - 1) ^ (word)p) >= HBLKSIZE)
/* Store debugging info into p. Return displaced pointer. */
/* Assumes we don't hold allocation lock. */
-ptr_t GC_store_debug_info(ptr_t p, word sz, const char *string, word integer)
+GC_INNER ptr_t GC_store_debug_info(ptr_t p, word sz, const char *string,
+ word integer)
{
word * result = (word *)((oh *)p + 1);
DCL_LOCK_STATE;
STATIC void GC_do_nothing(void) {}
#endif
-void GC_start_debugging(void)
+GC_INNER void GC_start_debugging(void)
{
# ifndef SHORT_DBG_HDRS
GC_check_heap = GC_check_heap_proc;
* We assume debugging was started in collector initialization,
* and we already hold the GC lock.
*/
- void * GC_debug_generic_malloc_inner(size_t lb, int k)
+ GC_INNER void * GC_debug_generic_malloc_inner(size_t lb, int k)
{
void * result = GC_generic_malloc_inner(lb + DEBUG_BYTES, k);
return (GC_store_debug_info_inner(result, (word)lb, "INTERNAL", (word)0));
}
- void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb, int k)
+ GC_INNER void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
+ int k)
{
void * result = GC_generic_malloc_inner_ignore_off_page(
lb + DEBUG_BYTES, k);
return copy;
}
-GC_API void * GC_CALL GC_debug_malloc_uncollectable(size_t lb, GC_EXTRA_PARAMS)
+GC_API void * GC_CALL GC_debug_malloc_uncollectable(size_t lb,
+ GC_EXTRA_PARAMS)
{
void * result = GC_malloc_uncollectable(lb + UNCOLLECTABLE_DEBUG_BYTES);
#if defined(THREADS) && defined(DBG_HDRS_ALL)
/* Used internally; we assume it's called correctly. */
- void GC_debug_free_inner(void * p)
+ GC_INNER void GC_debug_free_inner(void * p)
{
ptr_t base = GC_base(p);
GC_ASSERT((ptr_t)p - (ptr_t)base == sizeof(oh));
be taken to properly deal with statically allocated
variables in the main program. Used for MS Windows.
Also used by GCC v4+ (only when the dynamic shared library
- is being built) in conjunction with "-fvisibility=hidden"
- option to hide internally used symbols.
+ is being built) to hide internally used symbols.
GC_NOT_DLL User-settable macro that overrides _DLL, e.g. if runtime
dynamic libraries are used, but the collector is in a static
GC_DLL Build dynamic-link library (or dynamic shared object). For Unix this
causes the exported symbols to have 'default' visibility (ignored unless
- GCC v4+, meaningful only if used together with GCC -fvisibility=hidden
- option).
+ GCC v4+) and the internal ones to have 'hidden' visibility.
DONT_USE_USER32_DLL (Win32 only) Don't use "user32" DLL import library
(containing MessageBox() entry); useful for a static GC library.
GC_PRINT_VERBOSE_STATS Permanently turn on verbose logging (useful for
debugging and profiling on WinCE).
-GC_DONT_EXPAND Dont expand the heap unless explicitly requested or forced to.
+GC_DONT_EXPAND Don't expand the heap unless explicitly requested or forced to.
GC_INITIAL_HEAP_SIZE=<value> Set the desired default initial heap size
in bytes.
# endif
# ifndef USE_PROC_FOR_LIBRARIES
-void GC_register_dynamic_libraries(void)
+GC_INNER void GC_register_dynamic_libraries(void)
{
struct link_map *lm = GC_FirstDLOpenedLinkMap();
#define MAPS_BUF_SIZE (32*1024)
-char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
+GC_INNER char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
char **prot, unsigned int *maj_dev,
char **mapping_name);
-char *GC_get_maps(void); /* from os_dep.c */
+GC_INNER char *GC_get_maps(void); /* from os_dep.c */
/* Sort an array of HeapSects by start address. */
/* Unfortunately at least some versions of */
}
#ifdef THREADS
- GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi);
+ GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi);
#endif
STATIC word GC_register_map_entries(char *maps)
return 1;
}
-void GC_register_dynamic_libraries(void)
+GC_INNER void GC_register_dynamic_libraries(void)
{
if (!GC_register_map_entries(GC_get_maps()))
ABORT("Failed to read /proc for library registration.");
}
/* We now take care of the main data segment ourselves: */
-GC_bool GC_register_main_static_data(void)
+GC_INNER GC_bool GC_register_main_static_data(void)
{
return FALSE;
}
}
/* Do we need to separately register the main static data segment? */
-GC_bool GC_register_main_static_data(void)
+GC_INNER GC_bool GC_register_main_static_data(void)
{
return (dl_iterate_phdr == 0);
}
return cachedResult;
}
-void GC_register_dynamic_libraries(void)
+GC_INNER void GC_register_dynamic_libraries(void)
{
struct link_map *lm;
# define IRIX6
#endif
-void * GC_roots_present(ptr_t);
+GC_INNER void * GC_roots_present(ptr_t);
/* The type is a lie, since the real type doesn't make sense here, */
/* and we only test for NULL. */
/* We use /proc to track down all parts of the address space that are */
/* mapped by the process, and throw out regions we know we shouldn't */
/* worry about. This may also work under other SVR4 variants. */
-void GC_register_dynamic_libraries(void)
+GC_INNER void GC_register_dynamic_libraries(void)
{
static int fd = -1;
char buf[30];
/* We traverse the entire address space and register all segments */
/* that could possibly have been written to. */
- GC_bool GC_is_heap_base(ptr_t p);
+ GC_INNER GC_bool GC_is_heap_base(ptr_t p);
# ifdef GC_WIN32_THREADS
- void GC_get_next_stack(char *start, char * limit, char **lo, char **hi);
+ GC_INNER void GC_get_next_stack(char *start, char * limit, char **lo,
+ char **hi);
STATIC void GC_cond_add_roots(char *base, char * limit)
{
# endif
#ifdef DYNAMIC_LOADING
- GC_bool GC_register_main_static_data(void)
+ /* GC_register_main_static_data is not needed unless DYNAMIC_LOADING. */
+ GC_INNER GC_bool GC_register_main_static_data(void)
{
# ifdef MSWINCE
/* Do we need to separately register the main static data segment? */
# define GC_wnt TRUE
# endif
- void GC_register_dynamic_libraries(void)
+ GC_INNER void GC_register_dynamic_libraries(void)
{
MEMORY_BASIC_INFORMATION buf;
size_t result;
#include <loader.h>
-void GC_register_dynamic_libraries(void)
+GC_INNER void GC_register_dynamic_libraries(void)
{
int status;
ldr_process_t mypid;
extern char *sys_errlist[];
extern int sys_nerr;
-void GC_register_dynamic_libraries(void)
+GC_INNER void GC_register_dynamic_libraries(void)
{
int status;
int index = 1; /* Ordinal position in shared library search list */
# pragma alloca
# include <sys/ldr.h>
# include <sys/errno.h>
- void GC_register_dynamic_libraries(void)
+ GC_INNER void GC_register_dynamic_libraries(void)
{
int len;
char *ldibuf;
# endif
}
-void GC_register_dynamic_libraries(void)
+GC_INNER void GC_register_dynamic_libraries(void)
{
/* Currently does nothing. The callbacks are setup by GC_init_dyld()
The dyld library takes it from there. */
This should be called BEFORE any thread in created and WITHOUT the
allocation lock held. */
-void GC_init_dyld(void)
+GC_INNER void GC_init_dyld(void)
{
static GC_bool initialized = FALSE;
}
#define HAVE_REGISTER_MAIN_STATIC_DATA
-GC_bool GC_register_main_static_data(void)
+GC_INNER GC_bool GC_register_main_static_data(void)
{
/* Already done through dyld callbacks */
return FALSE;
# include "th/PCR_ThCtl.h"
# include "mm/PCR_MM.h"
- void GC_register_dynamic_libraries(void)
+ GC_INNER void GC_register_dynamic_libraries(void)
{
/* Add new static data areas of dynamically loaded modules. */
{
#else /* !PCR */
-void GC_register_dynamic_libraries(void) {}
+GC_INNER void GC_register_dynamic_libraries(void) {}
#endif /* !PCR */
#ifndef HAVE_REGISTER_MAIN_STATIC_DATA
/* Do we need to separately register the main static data segment? */
- GC_bool GC_register_main_static_data(void)
+ GC_INNER GC_bool GC_register_main_static_data(void)
{
return TRUE;
}
word GC_fo_entries = 0; /* used also in extra/MacOS.c */
-void GC_push_finalizer_structures(void)
+GC_INNER void GC_push_finalizer_structures(void)
{
GC_push_all((ptr_t)(&dl_head), (ptr_t)(&dl_head) + sizeof(word));
GC_push_all((ptr_t)(&fo_head), (ptr_t)(&fo_head) + sizeof(word));
#ifdef THREADS
/* Defined in pthread_support.c or win32_threads.c. Called with the */
/* allocation lock held. */
- void GC_reset_finalizer_nested(void);
- unsigned *GC_check_finalizer_nested(void);
+ GC_INNER void GC_reset_finalizer_nested(void);
+ GC_INNER unsigned *GC_check_finalizer_nested(void);
#else
/* Global variables to minimize the level of recursion when a client */
/* finalizer allocates memory. */
/* Called with held lock (but the world is running). */
/* Cause disappearing links to disappear and unreachable objects to be */
/* enqueued for finalization. */
-void GC_finalize(void)
+GC_INNER void GC_finalize(void)
{
struct disappearing_link * curr_dl, * prev_dl, * next_dl;
struct finalizable_object * curr_fo, * prev_fo, * next_fo;
static GC_word last_finalizer_notification = 0;
-void GC_notify_or_invoke_finalizers(void)
+GC_INNER void GC_notify_or_invoke_finalizers(void)
{
GC_finalizer_notifier_proc notifier_fn = 0;
# if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
}
#ifndef SMALL_CONFIG
- void GC_print_finalization_stats(void)
+ GC_INNER void GC_print_finalization_stats(void)
{
struct finalizable_object *fo = GC_finalize_now;
unsigned long ready = 0;
/* type structure (vtable in gcj). */
/* This adds a byte at the end of the object if GC_malloc would.*/
#ifdef THREAD_LOCAL_ALLOC
- void * GC_core_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr)
+ GC_INNER void * GC_core_gcj_malloc(size_t lb,
+ void * ptr_to_struct_containing_descr)
#else
GC_API void * GC_CALL GC_gcj_malloc(size_t lb,
void * ptr_to_struct_containing_descr)
return((void *) op);
}
-void GC_start_debugging(void);
+GC_INNER void GC_start_debugging(void);
/* Similar to GC_gcj_malloc, but add debug info. This is allocated */
/* with GC_gcj_debug_kind. */
/* bottom_index. */
/* Non-macro version of header location routine */
-hdr * GC_find_header(ptr_t h)
+GC_INNER hdr * GC_find_header(ptr_t h)
{
# ifdef HASH_TL
hdr * result;
/* of an object unless both GC_all_interior_pointers is set */
/* and p is in fact a valid object pointer. */
/* Never returns a pointer to a free hblk. */
+GC_INNER hdr *
#ifdef PRINT_BLACK_LIST
- hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source)
+ GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source)
#else
- hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce)
+ GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce)
#endif
{
hdr *hhdr;
/* GC_scratch_last_end_ptr is end point of last obtained scratch area. */
/* GC_scratch_end_ptr is end point of current scratch area. */
-ptr_t GC_scratch_alloc(size_t bytes)
+GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
{
register ptr_t result = scratch_free_ptr;
word GC_hdr_cache_misses = 0;
#endif
-void GC_init_headers(void)
+GC_INNER void GC_init_headers(void)
{
register unsigned i;
/* Install a header for block h. */
/* The header is uninitialized. */
/* Returns the header or 0 on failure. */
-struct hblkhdr * GC_install_header(struct hblk *h)
+GC_INNER struct hblkhdr * GC_install_header(struct hblk *h)
{
hdr * result;
}
/* Set up forwarding counts for block h of size sz */
-GC_bool GC_install_counts(struct hblk *h, size_t sz/* bytes */)
+GC_INNER GC_bool GC_install_counts(struct hblk *h, size_t sz/* bytes */)
{
struct hblk * hbp;
word i;
}
/* Remove the header for block h */
-void GC_remove_header(struct hblk *h)
+GC_INNER void GC_remove_header(struct hblk *h)
{
- hdr ** ha;
-
+ hdr **ha;
GET_HDR_ADDR(h, ha);
free_hdr(*ha);
*ha = 0;
}
/* Remove forwarding counts for h */
-void GC_remove_counts(struct hblk *h, size_t sz/* bytes */)
+GC_INNER void GC_remove_counts(struct hblk *h, size_t sz/* bytes */)
{
register struct hblk * hbp;
-
for (hbp = h+1; (char *)hbp < (char *)h + sz; hbp += 1) {
SET_HDR(hbp, 0);
}
/* Get the next valid block whose address is at least h */
/* Return 0 if there is none. */
-struct hblk * GC_next_used_block(struct hblk *h)
+GC_INNER struct hblk * GC_next_used_block(struct hblk *h)
{
register bottom_index * bi;
register word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
/* Get the last (highest address) block whose address is */
/* at most h. Return 0 if there is none. */
/* Unlike the above, this may return a free block. */
-struct hblk * GC_prev_block(struct hblk *h)
+GC_INNER struct hblk * GC_prev_block(struct hblk *h)
{
register bottom_index * bi;
register signed_word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
/* to stderr. It requires that we do not hold the lock. */
#if defined(SAVE_CALL_CHAIN)
struct callinfo;
- void GC_save_callers(struct callinfo info[NFRAMES]);
- void GC_print_callers(struct callinfo info[NFRAMES]);
+ GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]);
+ GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]);
# define ADD_CALL_CHAIN(base, ra) GC_save_callers(((oh *)(base)) -> oh_ci)
# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
#elif defined(GC_ADD_CALLER)
struct callinfo;
- void GC_print_callers(struct callinfo info[NFRAMES]);
+ GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]);
# define ADD_CALL_CHAIN(base, ra) ((oh *)(base)) -> oh_ci[0].ci_pc = (ra)
# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
#else
#ifdef SHORT_DBG_HDRS
# define GC_has_other_debug_info(p) TRUE
#else
- GC_bool GC_has_other_debug_info(ptr_t p);
+ GC_INNER GC_bool GC_has_other_debug_info(ptr_t p);
#endif
#if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
/* Store debugging info into p. Return displaced pointer. */
/* Assumes we don't hold allocation lock. */
-ptr_t GC_store_debug_info(ptr_t p, word sz, const char *str, word integer);
+GC_INNER ptr_t GC_store_debug_info(ptr_t p, word sz, const char *str,
+ word integer);
#endif /* _DBG_MLC_H */
# define HCE_HDR(h) ((hce) -> hce_hdr)
#ifdef PRINT_BLACK_LIST
- hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source);
+ GC_INNER hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce,
+ ptr_t source);
# define HEADER_CACHE_MISS(p, hce, source) \
GC_header_cache_miss(p, hce, source)
#else
- hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce);
+ GC_INNER hdr * GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce);
# define HEADER_CACHE_MISS(p, hce, source) GC_header_cache_miss(p, hce)
#endif
* DCL_LOCK_STATE declares any local variables needed by LOCK and UNLOCK.
*
* Note that I_HOLD_LOCK and I_DONT_HOLD_LOCK are used only positively
- * in assertions, and may return TRUE in the "dont know" case.
+ * in assertions, and may return TRUE in the "don't know" case.
*/
# ifdef THREADS
/* significant wasted time. We thus rely mostly on queued locks. */
# define USE_SPIN_LOCK
GC_EXTERN volatile AO_TS_t GC_allocate_lock;
- void GC_lock(void);
+ GC_INNER void GC_lock(void);
/* Allocation lock holder. Only set if acquired by client through */
/* GC_call_with_alloc_lock. */
# ifdef GC_ASSERTIONS
GC_EXTERN volatile GC_bool GC_collecting;
# define ENTER_GC() GC_collecting = 1;
# define EXIT_GC() GC_collecting = 0;
- void GC_lock(void);
+ GC_INNER void GC_lock(void);
GC_EXTERN unsigned long GC_lock_holder;
# ifdef GC_ASSERTIONS
GC_EXTERN unsigned long GC_mark_lock_holder;
*/
#endif /* PARALLEL_MARK */
-mse * GC_signal_mark_stack_overflow(mse *msp);
+GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp);
/* Push the object obj with corresponding heap block header hhdr onto */
/* the mark stack. */
/* mark stack entry bottom (incl.). Stop after performing */
/* about one page worth of work. Return the new mark stack */
/* top entry. */
-mse * GC_mark_from(mse * top, mse * bottom, mse *limit);
+GC_INNER mse * GC_mark_from(mse * top, mse * bottom, mse *limit);
#define MARK_FROM_MARK_STACK() \
GC_mark_stack_top = GC_mark_from(GC_mark_stack_top, \
#ifndef GC_INNER
/* This tagging macro must be used at the start of every variable */
- /* definition which is declared with GC_EXTERN. */
+ /* definition which is declared with GC_EXTERN. Should be also used */
+ /* for the GC-scope function definitions and prototypes. Must not be */
+ /* used in gcconfig.h. Shouldn't be used for the debugging-only */
+ /* functions. Currently, not used for the functions declared in or */
+ /* called from the "dated" source files (pcr_interface.c, specific.c */
+ /* and in the "extra" folder). */
# if defined(GC_DLL) && defined(__GNUC__) && !defined(MSWIN32) \
&& !defined(MSWINCE)
# if __GNUC__ >= 4
#ifdef SAVE_CALL_CHAIN
/* Fill in the pc and argument information for up to NFRAMES of my */
/* callers. Ignore my frame and my callers frame. */
- void GC_save_callers(struct callinfo info[NFRAMES]);
- void GC_print_callers(struct callinfo info[NFRAMES]);
+ GC_INNER void GC_save_callers(struct callinfo info[NFRAMES]);
+ GC_INNER void GC_print_callers(struct callinfo info[NFRAMES]);
#endif
PCR_waitForever);
# else
# if defined(GC_WIN32_THREADS) || defined(GC_PTHREADS)
- void GC_stop_world(void);
- void GC_start_world(void);
+ GC_INNER void GC_stop_world(void);
+ GC_INNER void GC_start_world(void);
# define STOP_WORLD() GC_stop_world()
# define START_WORLD() GC_start_world()
# else
#ifdef THREADS
/* Process all activation "frames" - scan entire stack except for */
/* frames belonging to the user functions invoked by GC_do_blocking(). */
- void GC_push_all_stack_frames(ptr_t lo, ptr_t hi,
+ GC_INNER void GC_push_all_stack_frames(ptr_t lo, ptr_t hi,
struct GC_activation_frame_s *activation_frame);
#else
GC_EXTERN ptr_t GC_blocked_sp;
#ifdef IA64
/* Similar to GC_push_all_stack_frames() but for IA-64 registers store. */
- void GC_push_all_register_frames(ptr_t bs_lo, ptr_t bs_hi,
+ GC_INNER void GC_push_all_register_frames(ptr_t bs_lo, ptr_t bs_hi,
int eager, struct GC_activation_frame_s *activation_frame);
#endif
/* Important internal collector routines */
-ptr_t GC_approx_sp(void);
+GC_INNER ptr_t GC_approx_sp(void);
-GC_bool GC_should_collect(void);
+GC_INNER GC_bool GC_should_collect(void);
void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
word client_data);
/* Invoke fn(hbp, client_data) for each */
/* allocated heap block. */
-struct hblk * GC_next_used_block(struct hblk * h);
+GC_INNER struct hblk * GC_next_used_block(struct hblk * h);
/* Return first in-use block >= h */
-struct hblk * GC_prev_block(struct hblk * h);
+GC_INNER struct hblk * GC_prev_block(struct hblk * h);
/* Return last block <= h. Returned block */
/* is managed by GC, but may or may not be in */
/* use. */
-void GC_mark_init(void);
-void GC_clear_marks(void);
+GC_INNER void GC_mark_init(void);
+GC_INNER void GC_clear_marks(void);
/* Clear mark bits for all heap objects. */
-void GC_invalidate_mark_state(void);
+GC_INNER void GC_invalidate_mark_state(void);
/* Tell the marker that marked */
/* objects may point to unmarked */
/* ones, and roots may point to */
/* unmarked objects. Reset mark stack. */
-GC_bool GC_mark_stack_empty(void);
-GC_bool GC_mark_some(ptr_t cold_gc_frame);
+GC_INNER GC_bool GC_mark_stack_empty(void);
+GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame);
/* Perform about one pages worth of marking */
/* work of whatever kind is needed. Returns */
/* quickly if no collection is in progress. */
/* Return TRUE if mark phase finished. */
-void GC_initiate_gc(void);
+GC_INNER void GC_initiate_gc(void);
/* initiate collection. */
/* If the mark state is invalid, this */
/* becomes full collection. Otherwise */
/* it's partial. */
-GC_bool GC_collection_in_progress(void);
+GC_INNER GC_bool GC_collection_in_progress(void);
/* Collection is in progress, or was abandoned. */
-void GC_push_all(ptr_t bottom, ptr_t top);
+GC_INNER void GC_push_all(ptr_t bottom, ptr_t top);
/* Push everything in a range */
/* onto mark stack. */
#ifndef SMALL_CONFIG
- void GC_push_conditional(ptr_t b, ptr_t t, GC_bool all);
+ GC_INNER void GC_push_conditional(ptr_t b, ptr_t t, GC_bool all);
#else
# define GC_push_conditional(b, t, all) GC_push_all(b, t)
#endif
/* Do either of the above, depending */
/* on the third arg. */
-void GC_push_all_stack(ptr_t b, ptr_t t);
+GC_INNER void GC_push_all_stack(ptr_t b, ptr_t t);
/* As above, but consider */
/* interior pointers as valid */
-void GC_push_all_eager(ptr_t b, ptr_t t);
+GC_INNER void GC_push_all_eager(ptr_t b, ptr_t t);
/* Same as GC_push_all_stack, but */
/* ensures that stack is scanned */
/* immediately, not just scheduled */
/* stacks are scheduled for scanning in *GC_push_other_roots, which */
/* is thread-package-specific. */
-void GC_push_roots(GC_bool all, ptr_t cold_gc_frame);
+GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame);
/* Push all or dirty roots. */
GC_EXTERN void (*GC_push_other_roots)(void);
/* supplied replacement should also call the */
/* original function. */
-void GC_push_finalizer_structures(void);
+GC_INNER void GC_push_finalizer_structures(void);
#ifdef THREADS
void GC_push_thread_structures(void);
#endif
/* A pointer such that we can avoid linking in */
/* the typed allocation support if unused. */
-void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *), ptr_t arg);
+GC_INNER void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
+ ptr_t arg);
#if defined(SPARC) || defined(IA64)
/* Cause all stacked registers to be saved in memory. Return a */
#endif
#if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
- void GC_mark_and_push_stack(ptr_t p, ptr_t source);
+ GC_INNER void GC_mark_and_push_stack(ptr_t p, ptr_t source);
/* Ditto, omits plausibility test */
#else
- void GC_mark_and_push_stack(ptr_t p);
+ GC_INNER void GC_mark_and_push_stack(ptr_t p);
#endif
-void GC_clear_hdr_marks(hdr * hhdr);
+GC_INNER void GC_clear_hdr_marks(hdr * hhdr);
/* Clear the mark bits in a header */
-void GC_set_hdr_marks(hdr * hhdr);
+GC_INNER void GC_set_hdr_marks(hdr * hhdr);
/* Set the mark bits in a header */
-void GC_set_fl_marks(ptr_t p);
+GC_INNER void GC_set_fl_marks(ptr_t p);
/* Set all mark bits associated with */
/* a free list. */
#ifdef GC_ASSERTIONS
/* set. Abort if not. */
#endif
void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp);
-void GC_exclude_static_roots_inner(void *start, void *finish);
-void GC_register_dynamic_libraries(void);
+GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish);
+GC_INNER void GC_register_dynamic_libraries(void);
/* Add dynamic library data sections to the root set. */
-void GC_cond_register_dynamic_libraries(void);
+GC_INNER void GC_cond_register_dynamic_libraries(void);
/* Remove and reregister dynamic libraries if we're */
/* configured to do that at each GC. */
/* Machine dependent startup routines */
ptr_t GC_get_main_stack_base(void); /* Cold end of stack. */
#ifdef IA64
- ptr_t GC_get_register_stack_base(void);
+ GC_INNER ptr_t GC_get_register_stack_base(void);
/* Cold end of register stack. */
#endif
void GC_register_data_segments(void);
#ifdef THREADS
- void GC_thr_init(void);
- void GC_init_parallel(void);
+ GC_INNER void GC_thr_init(void);
+ GC_INNER void GC_init_parallel(void);
#else
- GC_bool GC_is_static_root(ptr_t p);
+ GC_INNER GC_bool GC_is_static_root(ptr_t p);
/* Is the address p in one of the registered static */
/* root sections? */
#endif
/* Black listing: */
-void GC_bl_init(void);
+GC_INNER void GC_bl_init(void);
# ifdef PRINT_BLACK_LIST
- void GC_add_to_black_list_normal(word p, ptr_t source);
+ GC_INNER void GC_add_to_black_list_normal(word p, ptr_t source);
/* Register bits as a possible future false */
/* reference from the heap or static data */
# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
GC_add_to_black_list_normal((word)(bits), (source)); \
}
# else
- void GC_add_to_black_list_normal(word p);
+ GC_INNER void GC_add_to_black_list_normal(word p);
# define GC_ADD_TO_BLACK_LIST_NORMAL(bits, source) \
if (GC_all_interior_pointers) { \
GC_add_to_black_list_stack((word)(bits)); \
# endif
# ifdef PRINT_BLACK_LIST
- void GC_add_to_black_list_stack(word p, ptr_t source);
+ GC_INNER void GC_add_to_black_list_stack(word p, ptr_t source);
# define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
GC_add_to_black_list_stack((word)(bits), (source))
# else
- void GC_add_to_black_list_stack(word p);
+ GC_INNER void GC_add_to_black_list_stack(word p);
# define GC_ADD_TO_BLACK_LIST_STACK(bits, source) \
GC_add_to_black_list_stack((word)(bits))
# endif
-struct hblk * GC_is_black_listed(struct hblk * h, word len);
+GC_INNER struct hblk * GC_is_black_listed(struct hblk * h, word len);
/* If there are likely to be false references */
/* to a block starting at h of the indicated */
/* length, then return the next plausible */
/* starting location for h that might avoid */
/* these false references. */
-void GC_promote_black_lists(void);
+GC_INNER void GC_promote_black_lists(void);
/* Declare an end to a black listing phase. */
-void GC_unpromote_black_lists(void);
+GC_INNER void GC_unpromote_black_lists(void);
/* Approximately undo the effect of the above. */
/* This actually loses some information, but */
/* only in a reasonably safe way. */
-ptr_t GC_scratch_alloc(size_t bytes);
+GC_INNER ptr_t GC_scratch_alloc(size_t bytes);
/* GC internal memory allocation for */
/* small objects. Deallocation is not */
/* possible. */
/* Heap block layout maps: */
-GC_bool GC_add_map_entry(size_t sz);
+GC_INNER GC_bool GC_add_map_entry(size_t sz);
/* Add a heap block map for objects of */
/* size sz to obj_map. */
/* Return FALSE on failure. */
-void GC_register_displacement_inner(size_t offset);
+GC_INNER void GC_register_displacement_inner(size_t offset);
/* Version of GC_register_displacement */
/* that assumes lock is already held. */
-void GC_initialize_offsets(void);
+GC_INNER void GC_initialize_offsets(void);
/* Initialize GC_valid_offsets, */
/* depending on current */
/* GC_all_interior_pointers settings. */
/* hblk allocation: */
-void GC_new_hblk(size_t size_in_granules, int kind);
+GC_INNER void GC_new_hblk(size_t size_in_granules, int kind);
/* Allocate a new heap block, and build */
/* a free list in it. */
-ptr_t GC_build_fl(struct hblk *h, size_t words, GC_bool clear, ptr_t list);
+GC_INNER ptr_t GC_build_fl(struct hblk *h, size_t words, GC_bool clear,
+ ptr_t list);
/* Build a free list for objects of */
/* size sz in block h. Append list to */
/* end of the free lists. Possibly */
/* called by GC_new_hblk, but also */
/* called explicitly without GC lock. */
-struct hblk * GC_allochblk(size_t size_in_bytes, int kind, unsigned flags);
+GC_INNER struct hblk * GC_allochblk(size_t size_in_bytes, int kind,
+ unsigned flags);
/* Allocate a heap block, inform */
/* the marker that block is valid */
/* for objects of indicated size. */
-ptr_t GC_alloc_large(size_t lb, int k, unsigned flags);
+GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags);
/* Allocate a large block of size lb bytes. */
/* The block is not cleared. */
/* Flags is 0 or IGNORE_OFF_PAGE. */
/* Does not update GC_bytes_allocd, but does */
/* other accounting. */
-void GC_freehblk(struct hblk * p);
+GC_INNER void GC_freehblk(struct hblk * p);
/* Deallocate a heap block and mark it */
/* as invalid. */
/* Misc GC: */
-GC_bool GC_expand_hp_inner(word n);
-void GC_start_reclaim(int abort_if_found);
+GC_INNER GC_bool GC_expand_hp_inner(word n);
+GC_INNER void GC_start_reclaim(int abort_if_found);
/* Restore unmarked objects to free */
/* lists, or (if abort_if_found is */
/* TRUE) report them. */
/* Sweeping of small object pages is */
/* largely deferred. */
-void GC_continue_reclaim(size_t sz, int kind);
+GC_INNER void GC_continue_reclaim(size_t sz, int kind);
/* Sweep pages of the given size and */
/* kind, as long as possible, and */
/* as long as the corr. free list is */
/* empty. Sz is in granules. */
-GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old);
+GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old);
/* Reclaim all blocks. Abort (in a */
/* consistent state) if f returns TRUE. */
-ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
- GC_bool init, ptr_t list, signed_word *count);
+GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
+ GC_bool init, ptr_t list,
+ signed_word *count);
/* Rebuild free list in hbp with */
/* header hhdr, with objects of size sz */
/* bytes. Add list to the end of the */
/* free list. Add the number of */
/* reclaimed bytes to *count. */
-GC_bool GC_block_empty(hdr * hhdr);
+GC_INNER GC_bool GC_block_empty(hdr * hhdr);
/* Block completely unmarked? */
-int GC_CALLBACK GC_never_stop_func(void);
+GC_INNER int GC_CALLBACK GC_never_stop_func(void);
/* Always returns 0 (FALSE). */
-GC_bool GC_try_to_collect_inner(GC_stop_func f);
+GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func f);
/* Collect; caller must have acquired */
/* lock. Collection is aborted if f */
/* GC_init */
#endif
-void GC_collect_a_little_inner(int n);
+GC_INNER void GC_collect_a_little_inner(int n);
/* Do n units worth of garbage */
/* collection work, if appropriate. */
/* A unit is an amount appropriate for */
/* communicate object layout info */
/* to the collector. */
/* The actual decl is in gc_mark.h. */
-void * GC_generic_malloc_ignore_off_page(size_t b, int k);
+GC_INNER void * GC_generic_malloc_ignore_off_page(size_t b, int k);
/* As above, but pointers past the */
/* first page of the resulting object */
/* are ignored. */
-void * GC_generic_malloc_inner(size_t lb, int k);
+GC_INNER void * GC_generic_malloc_inner(size_t lb, int k);
/* Ditto, but I already hold lock, etc. */
-void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k);
+GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k);
/* Allocate an object, where */
/* the client guarantees that there */
/* will always be a pointer to the */
/* beginning of the object while the */
/* object is live. */
-ptr_t GC_allocobj(size_t sz, int kind);
+GC_INNER ptr_t GC_allocobj(size_t sz, int kind);
/* Make the indicated */
/* free list nonempty, and return its */
/* head. Sz is in granules. */
-void * GC_clear_stack(void *);
+GC_INNER void * GC_clear_stack(void *);
/* in misc.c, behaves like identity. */
/* We make the GC_clear_stack() call a tail one, hoping to get more of */
/* Allocation routines that bypass the thread local cache. */
#ifdef THREAD_LOCAL_ALLOC
- void * GC_core_malloc(size_t);
- void * GC_core_malloc_atomic(size_t);
+ GC_INNER void * GC_core_malloc(size_t);
+ GC_INNER void * GC_core_malloc_atomic(size_t);
# ifdef GC_GCJ_SUPPORT
- void * GC_core_gcj_malloc(size_t, void *);
+ GC_INNER void * GC_core_gcj_malloc(size_t, void *);
# endif
#endif /* THREAD_LOCAL_ALLOC */
-void GC_init_headers(void);
-struct hblkhdr * GC_install_header(struct hblk *h);
+GC_INNER void GC_init_headers(void);
+GC_INNER struct hblkhdr * GC_install_header(struct hblk *h);
/* Install a header for block h. */
/* Return 0 on failure, or the header */
/* otherwise. */
-GC_bool GC_install_counts(struct hblk * h, size_t sz);
+GC_INNER GC_bool GC_install_counts(struct hblk * h, size_t sz);
/* Set up forwarding counts for block */
/* h of size sz. */
/* Return FALSE on failure. */
-void GC_remove_header(struct hblk * h);
+GC_INNER void GC_remove_header(struct hblk * h);
/* Remove the header for block h. */
-void GC_remove_counts(struct hblk * h, size_t sz);
+GC_INNER void GC_remove_counts(struct hblk * h, size_t sz);
/* Remove forwarding counts for h. */
-hdr * GC_find_header(ptr_t h);
+GC_INNER hdr * GC_find_header(ptr_t h);
-void GC_finalize(void);
+GC_INNER void GC_finalize(void);
/* Perform all indicated finalization actions */
/* on unmarked objects. */
/* Unreachable finalizable objects are enqueued */
/* for processing by GC_invoke_finalizers. */
/* Invoked with lock. */
-void GC_notify_or_invoke_finalizers(void);
+GC_INNER void GC_notify_or_invoke_finalizers(void);
/* If GC_finalize_on_demand is not set, invoke */
/* eligible finalizers. Otherwise: */
/* Call *GC_finalizer_notifier if there are */
/* finalizers to be run, and we haven't called */
/* this procedure yet this GC cycle. */
-void GC_add_to_heap(struct hblk *p, size_t bytes);
+GC_INNER void GC_add_to_heap(struct hblk *p, size_t bytes);
/* Add a HBLKSIZE aligned chunk to the heap. */
#ifdef USE_PROC_FOR_LIBRARIES
- void GC_add_to_our_memory(ptr_t p, size_t bytes);
+ GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes);
/* Add a chunk to GC_our_memory. */
/* If p == 0, do nothing. */
#else
# define GC_add_to_our_memory(p, bytes)
#endif
-void GC_print_all_errors(void);
+GC_INNER void GC_print_all_errors(void);
/* Print smashed and leaked objects, if any. */
/* Clear the lists of such objects. */
#ifdef KEEP_BACK_PTRS
GC_EXTERN long GC_backtraces;
- void GC_generate_random_backtrace_no_gc(void);
+ GC_INNER void GC_generate_random_backtrace_no_gc(void);
#endif
GC_EXTERN GC_bool GC_print_back_height;
#endif
#ifdef THREADS
- void GC_free_inner(void * p);
+ GC_INNER void GC_free_inner(void * p);
#endif
/* Macros used for collector internal allocation. */
/* These assume the collector lock is held. */
#ifdef DBG_HDRS_ALL
- void * GC_debug_generic_malloc_inner(size_t lb, int k);
- void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb, int k);
+ GC_INNER void * GC_debug_generic_malloc_inner(size_t lb, int k);
+ GC_INNER void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
+ int k);
# define GC_INTERNAL_MALLOC GC_debug_generic_malloc_inner
# define GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE \
GC_debug_generic_malloc_inner_ignore_off_page
# ifdef THREADS
- void GC_debug_free_inner(void * p);
+ GC_INNER void GC_debug_free_inner(void * p);
# define GC_INTERNAL_FREE GC_debug_free_inner
# else
# define GC_INTERNAL_FREE GC_debug_free
#ifdef USE_MUNMAP
/* Memory unmapping: */
- void GC_unmap_old(void);
- void GC_merge_unmapped(void);
- void GC_unmap(ptr_t start, size_t bytes);
- void GC_remap(ptr_t start, size_t bytes);
- void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2, size_t bytes2);
+ GC_INNER void GC_unmap_old(void);
+ GC_INNER void GC_merge_unmapped(void);
+ GC_INNER void GC_unmap(ptr_t start, size_t bytes);
+ GC_INNER void GC_remap(ptr_t start, size_t bytes);
+ GC_INNER void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2,
+ size_t bytes2);
#endif
/* Virtual dirty bit implementation: */
/* Each implementation exports the following: */
-void GC_read_dirty(void);
+GC_INNER void GC_read_dirty(void);
/* Retrieve dirty bits. */
-GC_bool GC_page_was_dirty(struct hblk *h);
+GC_INNER GC_bool GC_page_was_dirty(struct hblk *h);
/* Read retrieved dirty bits. */
-void GC_remove_protection(struct hblk *h, word nblocks,
+GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
GC_bool pointerfree);
/* h is about to be written or allocated. Ensure */
/* that it's not write protected by the virtual */
/* dirty bit implementation. */
-void GC_dirty_init(void);
+GC_INNER void GC_dirty_init(void);
/* Slow/general mark bit manipulation: */
GC_API_PRIV GC_bool GC_is_marked(ptr_t p);
-void GC_clear_mark_bit(ptr_t p);
-void GC_set_mark_bit(ptr_t p);
+GC_INNER void GC_clear_mark_bit(ptr_t p);
+GC_INNER void GC_set_mark_bit(ptr_t p);
/* Stubborn objects: */
void GC_read_changed(void); /* Analogous to GC_read_dirty */
void GC_print_hblkfreelist(void);
void GC_print_heap_sects(void);
void GC_print_static_roots(void);
-void GC_print_finalization_stats(void);
+GC_INNER void GC_print_finalization_stats(void);
/* void GC_dump(void); - declared in gc.h */
#ifdef KEEP_BACK_PTRS
- void GC_store_back_pointer(ptr_t source, ptr_t dest);
- void GC_marked_for_finalization(ptr_t dest);
+ GC_INNER void GC_store_back_pointer(ptr_t source, ptr_t dest);
+ GC_INNER void GC_marked_for_finalization(ptr_t dest);
# define GC_STORE_BACK_PTR(source, dest) GC_store_back_pointer(source, dest)
# define GC_MARKED_FOR_FINALIZATION(dest) GC_marked_for_finalization(dest)
#else
/* newlines, don't ... */
#if defined(LINUX) && !defined(SMALL_CONFIG)
- void GC_err_write(const char *buf, size_t len);
+ GC_INNER void GC_err_write(const char *buf, size_t len);
/* Write buf to stderr, don't buffer, don't add */
/* newlines, don't ... */
#endif
/* GC_notify_all_builder() is called when GC_fl_builder_count */
/* reaches 0. */
- void GC_acquire_mark_lock(void);
- void GC_release_mark_lock(void);
- void GC_notify_all_builder(void);
- void GC_wait_for_reclaim(void);
+ GC_INNER void GC_acquire_mark_lock(void);
+ GC_INNER void GC_release_mark_lock(void);
+ GC_INNER void GC_notify_all_builder(void);
+ GC_INNER void GC_wait_for_reclaim(void);
GC_EXTERN word GC_fl_builder_count; /* Protected by mark lock. */
- void GC_notify_all_marker(void);
- void GC_wait_marker(void);
+ GC_INNER void GC_notify_all_marker(void);
+ GC_INNER void GC_wait_marker(void);
GC_EXTERN word GC_mark_no; /* Protected by mark lock. */
- void GC_help_marker(word my_mark_no);
+ GC_INNER void GC_help_marker(word my_mark_no);
/* Try to help out parallel marker for mark cycle */
/* my_mark_no. Returns if the mark cycle finishes or */
/* was already done, or there was nothing to do for */
/* Set up a handler for address faults which will longjmp to */
/* GC_jmp_buf; */
-void GC_setup_temporary_fault_handler(void);
+GC_INNER void GC_setup_temporary_fault_handler(void);
/* Undo the effect of GC_setup_temporary_fault_handler. */
-void GC_reset_fault_handler(void);
+GC_INNER void GC_reset_fault_handler(void);
# endif /* Need to handle address faults. */
GC_EXTERN GC_bool GC_thr_initialized;
-GC_thread GC_lookup_thread(pthread_t id);
+GC_INNER GC_thread GC_lookup_thread(pthread_t id);
-void GC_stop_init(void);
+GC_INNER void GC_stop_init(void);
GC_EXTERN GC_bool GC_in_thread_creation;
/* We may currently be in thread creation or destruction. */
#include "gc_inline.h"
-
# if defined(USE_HPUX_TLS)
# error USE_HPUX_TLS macro was replaced by USE_COMPILER_TLS
# endif
/* Each thread structure must be initialized. */
/* This call must be made from the new thread. */
/* Caller holds allocation lock. */
-void GC_init_thread_local(GC_tlfs p);
+GC_INNER void GC_init_thread_local(GC_tlfs p);
/* Called when a thread is unregistered, or exits. */
/* We hold the allocator lock. */
-void GC_destroy_thread_local(GC_tlfs p);
+GC_INNER void GC_destroy_thread_local(GC_tlfs p);
/* The thread support layer must arrange to mark thread-local */
/* free lists explicitly, since the link field is often */
/* invisible to the marker. It knows how to find all threads; */
/* we take care of an individual thread freelist structure. */
-void GC_mark_thread_local_fls_for(GC_tlfs p);
+GC_INNER void GC_mark_thread_local_fls_for(GC_tlfs p);
extern
#if defined(USE_COMPILER_TLS)
/* Ensure that either registers are pushed, or callee-save registers */
/* are somewhere on the stack, and then call fn(arg, ctxt). */
/* ctxt is either a pointer to a ucontext_t we generated, or NULL. */
-void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *), ptr_t arg)
+GC_INNER void GC_with_callee_saves_pushed(void (*fn)(ptr_t, void *),
+ ptr_t arg)
{
word dummy;
void * context = 0;
# include <errno.h>
#endif
-void GC_extend_size_map(size_t); /* in misc.c. */
+GC_INNER void GC_extend_size_map(size_t); /* in misc.c */
/* Allocate reclaim list for kind: */
/* Return TRUE on success */
return(TRUE);
}
-GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page,
- GC_bool retry); /* from alloc.c */
+GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
+ GC_bool ignore_off_page,
+ GC_bool retry); /* from alloc.c */
/* Allocate a large block of size lb bytes. */
/* The block is not cleared. */
/* Flags is 0 or IGNORE_OFF_PAGE. */
/* We hold the allocation lock. */
/* EXTRA_BYTES were already added to lb. */
-ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
+GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
{
struct hblk * h;
word n_blocks;
/* require special handling on allocation. */
/* First a version that assumes we already */
/* hold lock: */
-void * GC_generic_malloc_inner(size_t lb, int k)
+GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
{
void *op;
/* Allocate a composite object of size n bytes. The caller guarantees */
/* that pointers past the first page are not relevant. Caller holds */
/* allocation lock. */
-void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
+GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
{
word lb_adjusted;
void * op;
/* Allocate lb bytes of atomic (pointerfree) data */
#ifdef THREAD_LOCAL_ALLOC
- void * GC_core_malloc_atomic(size_t lb)
+ GC_INNER void * GC_core_malloc_atomic(size_t lb)
#else
GC_API void * GC_CALL GC_malloc_atomic(size_t lb)
#endif
/* Allocate lb bytes of composite (pointerful) data */
#ifdef THREAD_LOCAL_ALLOC
- void * GC_core_malloc(size_t lb)
+ GC_INNER void * GC_core_malloc(size_t lb)
#else
GC_API void * GC_CALL GC_malloc(size_t lb)
#endif
STATIC ptr_t GC_libpthread_end = 0;
STATIC ptr_t GC_libld_start = 0;
STATIC ptr_t GC_libld_end = 0;
- GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp);
+ GC_INNER GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp);
/* From os_dep.c */
STATIC void GC_init_lib_bounds(void)
/* Only used for internally allocated objects, so we can take some */
/* shortcuts. */
#ifdef THREADS
- void GC_free_inner(void * p)
+ GC_INNER void GC_free_inner(void * p)
{
struct hblk *h;
hdr *hhdr;
/* Allocate memory such that only pointers to near the */
/* beginning of the object are considered. */
/* We avoid holding allocation lock while we clear memory. */
-void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
+GC_INNER void * GC_generic_malloc_ignore_off_page(size_t lb, int k)
{
void *result;
size_t lg;
/* Is a collection in progress? Note that this can return true in the */
/* nonincremental case, if a collection has been abandoned and the */
/* mark state is now MS_INVALID. */
-GC_bool GC_collection_in_progress(void)
+GC_INNER GC_bool GC_collection_in_progress(void)
{
return(GC_mark_state != MS_NONE);
}
/* clear all mark bits in the header */
-void GC_clear_hdr_marks(hdr *hhdr)
+GC_INNER void GC_clear_hdr_marks(hdr *hhdr)
{
size_t last_bit = FINAL_MARK_BIT(hhdr -> hb_sz);
}
/* Set all mark bits in the header. Used for uncollectable blocks. */
-void GC_set_hdr_marks(hdr *hhdr)
+GC_INNER void GC_set_hdr_marks(hdr *hhdr)
{
unsigned i;
size_t sz = hhdr -> hb_sz;
}
/* Slow but general routines for setting/clearing/asking about mark bits */
-void GC_set_mark_bit(ptr_t p)
+GC_INNER void GC_set_mark_bit(ptr_t p)
{
struct hblk *h = HBLKPTR(p);
hdr * hhdr = HDR(h);
}
}
-void GC_clear_mark_bit(ptr_t p)
+GC_INNER void GC_clear_mark_bit(ptr_t p)
{
struct hblk *h = HBLKPTR(p);
hdr * hhdr = HDR(h);
* the marker invariant, and sets GC_mark_state to reflect this.
* (This implicitly starts marking to reestablish the invariant.)
*/
-void GC_clear_marks(void)
+GC_INNER void GC_clear_marks(void)
{
GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
GC_objects_are_marked = FALSE;
/* Initiate a garbage collection. Initiates a full collection if the */
/* mark state is invalid. */
-void GC_initiate_gc(void)
+GC_INNER void GC_initiate_gc(void)
{
if (GC_dirty_maintained) GC_read_dirty();
# ifdef STUBBORN_ALLOC
/* allocator lock long before we get here. */
STATIC GC_bool GC_mark_some_inner(ptr_t cold_gc_frame)
#else
- GC_bool GC_mark_some(ptr_t cold_gc_frame)
+ GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame)
#endif
{
switch(GC_mark_state) {
/* unexpected thread start? */
#endif
- GC_bool GC_mark_some(ptr_t cold_gc_frame)
+ GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame)
{
GC_bool ret_val;
}
#endif /* WRAP_MARK_SOME */
-GC_bool GC_mark_stack_empty(void)
+GC_INNER GC_bool GC_mark_stack_empty(void)
{
return(GC_mark_stack_top < GC_mark_stack);
}
-void GC_invalidate_mark_state(void)
+GC_INNER void GC_invalidate_mark_state(void)
{
GC_mark_state = MS_INVALID;
GC_mark_stack_top = GC_mark_stack-1;
}
-mse * GC_signal_mark_stack_overflow(mse *msp)
+GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp)
{
GC_mark_state = MS_INVALID;
GC_mark_stack_too_small = TRUE;
* encoding, we optionally maintain a cache for the block address to
* header mapping, we prefetch when an object is "grayed", etc.
*/
-mse * GC_mark_from(mse *mark_stack_top, mse *mark_stack, mse *mark_stack_limit)
+GC_INNER mse * GC_mark_from(mse *mark_stack_top, mse *mark_stack,
+ mse *mark_stack_limit)
{
signed_word credit = HBLKSIZE; /* Remaining credit for marking work */
ptr_t current_p; /* Pointer to current candidate ptr. */
/* Try to help out the marker, if it's running. */
/* We do not hold the GC lock, but the requestor does. */
-void GC_help_marker(word my_mark_no)
+GC_INNER void GC_help_marker(word my_mark_no)
{
mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
unsigned my_id;
GC_mark_stack_top = GC_mark_stack-1;
}
-void GC_mark_init(void)
+GC_INNER void GC_mark_init(void)
{
alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
}
* Should only be used if there is no possibility of mark stack
* overflow.
*/
-void GC_push_all(ptr_t bottom, ptr_t top)
+GC_INNER void GC_push_all(ptr_t bottom, ptr_t top)
{
register word length;
}
# ifdef PROC_VDB
- GC_bool GC_page_was_ever_dirty(struct hblk *h);
+ GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk *h);
/* Could the page contain valid heap pointers? */
# endif
- void GC_push_conditional(ptr_t bottom, ptr_t top, GC_bool all)
+ GC_INNER void GC_push_conditional(ptr_t bottom, ptr_t top, GC_bool all)
{
if (all) {
if (GC_dirty_maintained) {
/* Mark bits are NOT atomically updated. Thus this must be the */
/* only thread setting them. */
# if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
- void GC_mark_and_push_stack(ptr_t p, ptr_t source)
+ GC_INNER void GC_mark_and_push_stack(ptr_t p, ptr_t source)
# else
- void GC_mark_and_push_stack(ptr_t p)
+ GC_INNER void GC_mark_and_push_stack(ptr_t p)
# define source ((ptr_t)0)
# endif
{
* and scans the entire region immediately, in case the contents
* change.
*/
-void GC_push_all_eager(ptr_t bottom, ptr_t top)
+GC_INNER void GC_push_all_eager(ptr_t bottom, ptr_t top)
{
word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
# undef GC_least_plausible_heap_addr
}
-void GC_push_all_stack(ptr_t bottom, ptr_t top)
+GC_INNER void GC_push_all_stack(ptr_t bottom, ptr_t top)
{
# if defined(THREADS) && defined(MPROTECT_VDB)
GC_push_all_eager(bottom, top);
#ifndef THREADS
/* Primarily for debugging support: */
/* Is the address p in one of the registered static root sections? */
- GC_bool GC_is_static_root(ptr_t p)
+ GC_INNER GC_bool GC_is_static_root(ptr_t p)
{
static int last_root_set = MAX_ROOT_SETS;
int i;
/* Is a range starting at b already in the table? If so return a */
/* pointer to it, else NIL. */
- struct roots * GC_roots_present(ptr_t b)
+ GC_INNER struct roots * GC_roots_present(ptr_t b)
{
int h = rt_hash(b);
struct roots *p = GC_root_index[h];
}
#endif /* MSWIN32 || MSWINCE */
-ptr_t GC_approx_sp(void)
+GC_INNER ptr_t GC_approx_sp(void)
{
volatile word sp;
sp = (word)&sp;
/* Should only be called when the lock is held. The range boundaries */
/* should be properly aligned and valid. */
-void GC_exclude_static_roots_inner(void *start, void *finish)
+GC_INNER void GC_exclude_static_roots_inner(void *start, void *finish)
{
struct exclusion * next;
size_t next_index, i;
#ifdef IA64
/* Similar to GC_push_all_stack_frames() but for IA-64 registers store. */
- void GC_push_all_register_frames(ptr_t bs_lo, ptr_t bs_hi,
+ GC_INNER void GC_push_all_register_frames(ptr_t bs_lo, ptr_t bs_hi,
int eager, struct GC_activation_frame_s *activation_frame)
{
while (activation_frame != NULL) {
#ifdef THREADS
-void GC_push_all_stack_frames(ptr_t lo, ptr_t hi,
+GC_INNER void GC_push_all_stack_frames(ptr_t lo, ptr_t hi,
struct GC_activation_frame_s *activation_frame)
{
while (activation_frame != NULL) {
}
#ifdef THREAD_LOCAL_ALLOC
- void GC_mark_thread_local_free_lists(void);
+ GC_INNER void GC_mark_thread_local_free_lists(void);
#endif
-void GC_cond_register_dynamic_libraries(void)
+GC_INNER void GC_cond_register_dynamic_libraries(void)
{
# if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
|| defined(PCR)
* A zero value indicates that it's OK to miss some
* register values.
*/
-void GC_push_roots(GC_bool all, ptr_t cold_gc_frame)
+GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame)
{
int i;
unsigned kind;
#ifdef DYNAMIC_LOADING
/* We need to register the main data segment. Returns TRUE unless */
/* this is done implicitly as part of dynamic library registration. */
- GC_bool GC_register_main_static_data(void);
+ GC_INNER GC_bool GC_register_main_static_data(void);
# define GC_REGISTER_MAIN_STATIC_DATA() GC_register_main_static_data()
#else
/* Don't unnecessarily call GC_register_main_static_data() in case */
/* We assume the ith entry is currently 0. */
/* Note that a filled in section of the array ending at n always */
/* has length at least n/4. */
-void GC_extend_size_map(size_t i)
+GC_INNER void GC_extend_size_map(size_t i)
{
size_t orig_granule_sz = ROUNDED_UP_GRANULES(i);
size_t granule_sz = orig_granule_sz;
/* Clear some of the inaccessible part of the stack. Returns its */
/* argument, so it can be used in a tail call position, hence clearing */
/* another frame. */
-void * GC_clear_stack(void *arg)
+GC_INNER void * GC_clear_stack(void *arg)
{
ptr_t sp = GC_approx_sp(); /* Hotter than actual sp */
# ifdef THREADS
#endif
#ifdef MSWIN32
- void GC_init_win32(void);
+ GC_INNER void GC_init_win32(void);
#endif
-void GC_setpagesize(void);
+GC_INNER void GC_setpagesize(void);
STATIC void GC_exit_check(void)
{
}
#ifdef SEARCH_FOR_DATA_START
- void GC_init_linux_data_start(void);
+ GC_INNER void GC_init_linux_data_start(void);
#endif
#ifdef UNIX_LIKE
- void GC_set_and_save_fault_handler(void (*handler)(int));
+ GC_INNER void GC_set_and_save_fault_handler(void (*handler)(int));
static void looping_handler(int sig)
{
#endif
#if defined(DYNAMIC_LOADING) && defined(DARWIN)
- void GC_init_dyld(void);
+ GC_INNER void GC_init_dyld(void);
#endif
#if defined(NETBSD) && defined(__ELF__)
- void GC_init_netbsd_elf(void);
+ GC_INNER void GC_init_netbsd_elf(void);
#endif
#if !defined(OS2) && !defined(MACOS) && !defined(MSWIN32) && !defined(MSWINCE)
}
#if defined(LINUX) && !defined(SMALL_CONFIG)
- void GC_err_write(const char *buf, size_t len)
+ GC_INNER void GC_err_write(const char *buf, size_t len)
{
if (WRITE(GC_stderr, buf, len) < 0) ABORT("write to stderr failed");
}
#ifdef THREADS
/* Defined in pthread_support.c or win32_threads.c. */
- void GC_do_blocking_inner(ptr_t data, void * context);
+ GC_INNER void GC_do_blocking_inner(ptr_t data, void * context);
#else
/* This could be called without the main GC lock, if we ensure that */
/* there is no concurrent collection which might reclaim objects that */
/* we have not yet allocated. */
-ptr_t GC_build_fl(struct hblk *h, size_t sz, GC_bool clear, ptr_t list)
+GC_INNER ptr_t GC_build_fl(struct hblk *h, size_t sz, GC_bool clear,
+ ptr_t list)
{
word *p, *prev;
word *last_object; /* points to last object in new hblk */
/* Handle small objects sizes more efficiently. For larger objects */
/* the difference is less significant. */
# ifndef SMALL_CONFIG
- switch (sz) {
+ switch (sz) {
case 2: if (clear) {
return GC_build_fl_clear2(h, list);
} else {
}
default:
break;
- }
+ }
# endif /* !SMALL_CONFIG */
/* Clear the page if necessary. */
* Set all mark bits if objects are uncollectable.
* Will fail to do anything if we are out of memory.
*/
-void GC_new_hblk(size_t gran, int kind)
+GC_INNER void GC_new_hblk(size_t gran, int kind)
{
struct hblk *h; /* the new heap block */
GC_bool clear = GC_obj_kinds[kind].ok_init;
UNLOCK();
}
-void GC_register_displacement_inner(size_t offset)
+GC_INNER void GC_register_displacement_inner(size_t offset)
{
if (offset >= VALID_OFFSET_SZ) {
ABORT("Bad argument to GC_register_displacement");
/* Add a heap block map for objects of size granules to obj_map. */
/* Return FALSE on failure. */
/* A size of 0 granules is used for large objects. */
- GC_bool GC_add_map_entry(size_t granules)
+ GC_INNER GC_bool GC_add_map_entry(size_t granules)
{
unsigned displ;
short * new_map;
}
#endif
-void GC_initialize_offsets(void)
+GC_INNER void GC_initialize_offsets(void)
{
static GC_bool offsets_initialized = FALSE;
* This code could be simplified if we could determine its size
* ahead of time.
*/
-char * GC_get_maps(void)
+GC_INNER char * GC_get_maps(void)
{
int f;
int result;
* *prot and *mapping_name are assigned pointers into the original
* buffer.
*/
-char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
+GC_INNER char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
char **prot, unsigned int *maj_dev,
char **mapping_name)
{
#if defined(REDIRECT_MALLOC)
/* Find the text(code) mapping for the library whose name, after */
/* stripping the directory part, starts with nm. */
-GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp)
+GC_INNER GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp)
{
size_t nm_len = strlen(nm);
char *prot;
ptr_t GC_find_limit(ptr_t, GC_bool);
- void GC_init_linux_data_start(void)
+ GC_INNER void GC_init_linux_data_start(void)
{
# if defined(LINUX) || defined(HURD)
extern char **environ;
- void GC_init_netbsd_elf(void)
+ GC_INNER void GC_init_netbsd_elf(void)
{
/* This may need to be environ, without the underscore, for */
/* some versions. */
GC_INNER GC_bool GC_dont_query_stack_min = FALSE;
# endif
- void GC_setpagesize(void)
+ GC_INNER void GC_setpagesize(void)
{
GetSystemInfo(&GC_sysinfo);
GC_page_size = GC_sysinfo.dwPageSize;
}
# else
- void GC_setpagesize(void)
+ GC_INNER void GC_setpagesize(void)
{
# if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
GC_page_size = GETPAGESIZE();
static GC_fault_handler_t old_segv_handler, old_bus_handler;
# endif
- void GC_set_and_save_fault_handler(GC_fault_handler_t h)
+ GC_INNER void GC_set_and_save_fault_handler(GC_fault_handler_t h)
{
# if defined(SUNOS5SIGS) || defined(IRIX5) \
|| defined(OSF1) || defined(HURD) || defined(NETBSD)
LONGJMP(GC_jmp_buf, 1);
}
- void GC_setup_temporary_fault_handler(void)
+ GC_INNER void GC_setup_temporary_fault_handler(void)
{
/* Handler is process-wide, so this should only happen in */
/* one thread at a time. */
GC_set_and_save_fault_handler(GC_fault_handler);
}
- void GC_reset_fault_handler(void)
+ GC_INNER void GC_reset_fault_handler(void)
{
# if defined(SUNOS5SIGS) || defined(IRIX5) \
|| defined(OSF1) || defined(HURD) || defined(NETBSD)
#include <sys/param.h>
#include <sys/pstat.h>
- ptr_t GC_get_register_stack_base(void)
+ GC_INNER ptr_t GC_get_register_stack_base(void)
{
struct pst_vm_status vm_status;
extern ptr_t __libc_ia64_register_backing_store_base;
# endif
- ptr_t GC_get_register_stack_base(void)
+ GC_INNER ptr_t GC_get_register_stack_base(void)
{
ptr_t result;
/* extern int pthread_getattr_np(pthread_t, pthread_attr_t *); */
#ifdef IA64
- ptr_t GC_greatest_stack_base_below(ptr_t bound);
+ GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound);
/* From pthread_support.c */
#endif
GC_INNER GC_bool GC_wnt = FALSE;
/* This is a Windows NT derivative, i.e. NT, W2K, XP or later. */
- void GC_init_win32(void)
+ GC_INNER void GC_init_win32(void)
{
/* Set GC_wnt. If we're running under win32s, assume that no DLLs */
/* will be loaded. I doubt anyone still runs win32s, but... */
STATIC size_t GC_max_root_size = 100000; /* Appr. largest root size. */
- void GC_add_current_malloc_heap(void)
+ GC_INNER void GC_add_current_malloc_heap(void)
{
struct GC_malloc_heap_list *new_l =
malloc(sizeof(struct GC_malloc_heap_list));
/* Is p the start of either the malloc heap, or of one of our */
/* heap sections? */
- GC_bool GC_is_heap_base(ptr_t p)
+ GC_INNER GC_bool GC_is_heap_base(ptr_t p)
{
unsigned i;
# ifndef REDIRECT_MALLOC
/* We assume that GC_remap is called on exactly the same range */
/* as a previous call to GC_unmap. It is safe to consistently */
/* round the endpoints in both places. */
-void GC_unmap(ptr_t start, size_t bytes)
+GC_INNER void GC_unmap(ptr_t start, size_t bytes)
{
ptr_t start_addr = GC_unmap_start(start, bytes);
ptr_t end_addr = GC_unmap_end(start, bytes);
# endif
}
-void GC_remap(ptr_t start, size_t bytes)
+GC_INNER void GC_remap(ptr_t start, size_t bytes)
{
ptr_t start_addr = GC_unmap_start(start, bytes);
ptr_t end_addr = GC_unmap_end(start, bytes);
/* be merged. Unmap the whole block. This typically requires */
/* that we unmap a small section in the middle that was not previously */
/* unmapped due to alignment constraints. */
-void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2, size_t bytes2)
+GC_INNER void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2,
+ size_t bytes2)
{
ptr_t start1_addr = GC_unmap_start(start1, bytes1);
ptr_t end1_addr = GC_unmap_end(start1, bytes1);
# if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
-void GC_push_all_stacks(void);
+GC_INNER void GC_push_all_stacks(void);
STATIC void GC_default_push_other_roots(void)
{
static PVOID gww_buf[GC_GWW_BUF_LEN];
# ifdef MPROTECT_VDB
- GC_bool GC_gww_dirty_init(void)
+ GC_INNER GC_bool GC_gww_dirty_init(void)
{
detect_GetWriteWatch();
return GC_GWW_AVAILABLE();
}
# else
- void GC_dirty_init(void)
+ GC_INNER void GC_dirty_init(void)
{
detect_GetWriteWatch();
GC_dirty_maintained = GC_GWW_AVAILABLE();
# ifdef MPROTECT_VDB
STATIC void GC_gww_read_dirty(void)
# else
- void GC_read_dirty(void)
+ GC_INNER void GC_read_dirty(void)
# endif
{
word i;
# ifdef MPROTECT_VDB
STATIC GC_bool GC_gww_page_was_dirty(struct hblk * h)
# else
- GC_bool GC_page_was_dirty(struct hblk * h)
+ GC_INNER GC_bool GC_page_was_dirty(struct hblk * h)
# endif
{
return HDR(h) == 0 ||
# ifdef MPROTECT_VDB
STATIC GC_bool GC_gww_page_was_ever_dirty(struct hblk * h)
# else
- GC_bool GC_page_was_ever_dirty(struct hblk * h)
+ GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h)
# endif
{
return HDR(h) == 0 ||
# ifndef MPROTECT_VDB
/*ARGSUSED*/
- void GC_remove_protection(struct hblk *h, word nblocks,
+ GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
GC_bool is_ptrfree) {}
# endif
/* written. */
/* Initialize virtual dirty bit implementation. */
-void GC_dirty_init(void)
+GC_INNER void GC_dirty_init(void)
{
if (GC_print_stats == VERBOSE)
GC_log_printf("Initializing DEFAULT_VDB...\n");
/* Retrieve system dirty bits for heap to a local buffer. */
/* Restore the systems notion of which pages are dirty. */
-void GC_read_dirty(void) {}
+GC_INNER void GC_read_dirty(void) {}
/* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
/* If the actual page size is different, this returns TRUE if any */
/* of the pages overlapping h are dirty. This routine may err on the */
/* side of labeling pages as dirty (and this implementation does). */
/*ARGSUSED*/
-GC_bool GC_page_was_dirty(struct hblk *h)
+GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
{
return(TRUE);
}
#if 0
/* Could any valid GC heap pointer ever have been written to this page? */
/*ARGSUSED*/
-GC_bool GC_page_was_ever_dirty(struct hblk *h)
+GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk *h)
{
return(TRUE);
}
/* pointer-free system call buffers in the heap are */
/* not protected. */
/*ARGSUSED*/
-void GC_remove_protection(struct hblk *h, word nblocks,
+GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
GC_bool is_ptrfree) {}
# endif /* DEFAULT_VDB */
# ifdef MANUAL_VDB
/* Initialize virtual dirty bit implementation. */
-void GC_dirty_init(void)
+GC_INNER void GC_dirty_init(void)
{
if (GC_print_stats == VERBOSE)
GC_log_printf("Initializing MANUAL_VDB...\n");
/* Retrieve system dirty bits for heap to a local buffer. */
/* Restore the systems notion of which pages are dirty. */
-void GC_read_dirty(void)
+GC_INNER void GC_read_dirty(void)
{
BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
(sizeof GC_dirty_pages));
/* If the actual page size is different, this returns TRUE if any */
/* of the pages overlapping h are dirty. This routine may err on the */
/* side of labeling pages as dirty (and this implementation does). */
-GC_bool GC_page_was_dirty(struct hblk *h)
+GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
{
register word index;
}
/*ARGSUSED*/
-void GC_remove_protection(struct hblk *h, word nblocks,
+GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
GC_bool is_ptrfree) {}
# endif /* MANUAL_VDB */
# endif
# if defined(MSWIN32) || defined(MSWINCE)
- LONG WINAPI GC_write_fault_handler(struct _EXCEPTION_POINTERS *exc_info)
+ GC_INNER LONG WINAPI GC_write_fault_handler(
+ struct _EXCEPTION_POINTERS *exc_info)
# else
# include <ucontext.h>
/*ARGSUSED*/
* starting at h are no longer protected. If is_ptrfree is false,
* also ensure that they will subsequently appear to be dirty.
*/
-void GC_remove_protection(struct hblk *h, word nblocks, GC_bool is_ptrfree)
+GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
+ GC_bool is_ptrfree)
{
struct hblk * h_trunc; /* Truncated to page boundary */
struct hblk * h_end; /* Page boundary following block end */
}
#if !defined(DARWIN)
- void GC_dirty_init(void)
+ GC_INNER void GC_dirty_init(void)
{
# if !defined(MSWIN32) && !defined(MSWINCE)
struct sigaction act, oldact;
/* We assume that either the world is stopped or its OK to lose dirty */
/* bits while this is happenning (as in GC_enable_incremental). */
-void GC_read_dirty(void)
+GC_INNER void GC_read_dirty(void)
{
# if defined(GWW_VDB)
if (GC_GWW_AVAILABLE()) {
GC_protect_heap();
}
-GC_bool GC_page_was_dirty(struct hblk *h)
+GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
{
register word index;
#endif
/*ARGSUSED*/
-GC_bool GC_page_was_ever_dirty(struct hblk *h)
+GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk *h)
{
# if defined(GWW_VDB)
if (GC_GWW_AVAILABLE())
STATIC int GC_proc_fd = 0;
-void GC_dirty_init(void)
+GC_INNER void GC_dirty_init(void)
{
int fd;
char buf[30];
/* Ignore write hints. They don't help us here. */
/*ARGSUSED*/
-void GC_remove_protection(struct hblk *h, word nblocks,
+GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
GC_bool is_ptrfree) {}
# define READ(fd,buf,nbytes) read(fd, buf, nbytes)
-void GC_read_dirty(void)
+GC_INNER void GC_read_dirty(void)
{
unsigned long ps, np;
int nmaps;
#undef READ
-GC_bool GC_page_was_dirty(struct hblk *h)
+GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
{
register word index = PHT_HASH(h);
return get_pht_entry_from_index(GC_grungy_pages, index);
}
-GC_bool GC_page_was_ever_dirty(struct hblk *h)
+GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk *h)
{
register word index = PHT_HASH(h);
return get_pht_entry_from_index(GC_written_pages, index);
/* Address corresponding to GC_grungy_bits[0] */
/* HBLKSIZE aligned. */
-void GC_dirty_init(void)
+GC_INNER void GC_dirty_init(void)
{
GC_dirty_maintained = TRUE;
/* For the time being, we assume the heap generally grows up */
}
}
-void GC_read_dirty(void)
+GC_INNER void GC_read_dirty(void)
{
/* lazily enable dirty bits on newly added heap sects */
{
}
}
-GC_bool GC_page_was_dirty(struct hblk *h)
+GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
{
if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
return(TRUE);
}
/*ARGSUSED*/
-void GC_remove_protection(struct hblk *h, word nblocks, GC_bool is_ptrfree)
+GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
+ GC_bool is_ptrfree)
{
PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
ABORT("mach_msg failed in GC_mprotect_thread_reply");
}
-void GC_mprotect_stop(void)
+GC_INNER void GC_mprotect_stop(void)
{
GC_mprotect_thread_notify(ID_STOP);
}
-void GC_mprotect_resume(void)
+GC_INNER void GC_mprotect_resume(void)
{
GC_mprotect_thread_notify(ID_RESUME);
}
}
#endif /* BROKEN_EXCEPTION_HANDLING */
-void GC_dirty_init(void)
+GC_INNER void GC_dirty_init(void)
{
kern_return_t r;
mach_port_t me;
GC_in_save_callers = FALSE;
#endif
-void GC_save_callers(struct callinfo info[NFRAMES])
+GC_INNER void GC_save_callers(struct callinfo info[NFRAMES])
{
void * tmp_info[NFRAMES + 1];
int npcs, i;
# define BIAS 0
#endif
-void GC_save_callers(struct callinfo info[NFRAMES])
+GC_INNER void GC_save_callers(struct callinfo info[NFRAMES])
{
struct frame *frame;
struct frame *fp;
#ifdef NEED_CALLINFO
/* Print info to stderr. We do NOT hold the allocation lock */
-void GC_print_callers(struct callinfo info[NFRAMES])
+GC_INNER void GC_print_callers(struct callinfo info[NFRAMES])
{
int i;
static int reentry_count = 0;
# endif
/* We hold allocation lock. Should do exactly the right thing if the */
/* world is stopped. Should not fail if it isn't. */
-void GC_push_all_stacks(void)
+GC_INNER void GC_push_all_stacks(void)
{
GC_bool found_me = FALSE;
size_t nthreads = 0;
return n_live_threads;
}
-void GC_stop_world(void)
+GC_INNER void GC_stop_world(void)
{
int i;
# ifndef GC_OPENBSD_THREADS
/* Caller holds allocation lock, and has held it continuously since */
/* the world stopped. */
-void GC_start_world(void)
+GC_INNER void GC_start_world(void)
{
pthread_t my_thread = pthread_self();
register int i;
# endif
}
-void GC_stop_init(void)
+GC_INNER void GC_stop_init(void)
{
# ifndef GC_OPENBSD_THREADS
struct sigaction act;
/* list links wouldn't otherwise be found. We also set them in the */
/* normal free lists, since that involves touching less memory than if */
/* we scanned them normally. */
-void GC_mark_thread_local_free_lists(void)
+GC_INNER void GC_mark_thread_local_free_lists(void)
{
int i;
GC_thread p;
/* updates. */
/* If there is more than one thread with the given id we */
/* return the most recent one. */
-GC_thread GC_lookup_thread(pthread_t id)
+GC_INNER GC_thread GC_lookup_thread(pthread_t id)
{
int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
register GC_thread p = GC_threads[hv];
}
/* Called by GC_finalize() (in case of an allocation failure observed). */
-void GC_reset_finalizer_nested(void)
+GC_INNER void GC_reset_finalizer_nested(void)
{
GC_thread me = GC_lookup_thread(pthread_self());
me->finalizer_nested = 0;
/* collector (to minimize the risk of a deep finalizers recursion), */
/* otherwise returns a pointer to the thread-local finalizer_nested. */
/* Called by GC_notify_or_invoke_finalizers() only (the lock is held). */
-unsigned *GC_check_finalizer_nested(void)
+GC_INNER unsigned *GC_check_finalizer_nested(void)
{
GC_thread me = GC_lookup_thread(pthread_self());
unsigned nesting_level = me->finalizer_nested;
#endif /* HANDLE_FORK */
#ifdef USE_PROC_FOR_LIBRARIES
- GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
+ GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
{
int i;
GC_thread p;
/* Find the largest stack_base smaller than bound. May be used */
/* to find the boundary between a register stack and adjacent */
/* immediately preceding memory stack. */
- ptr_t GC_greatest_stack_base_below(ptr_t bound)
+ GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound)
{
int i;
GC_thread p;
#endif
/* We hold the allocation lock. */
-void GC_thr_init(void)
+GC_INNER void GC_thr_init(void)
{
# ifndef GC_DARWIN_THREADS
int dummy;
/* Called without allocation lock. */
/* Must be called before a second thread is created. */
/* Did we say it's called without the allocation lock? */
-void GC_init_parallel(void)
+GC_INNER void GC_init_parallel(void)
{
if (parallel_initialized) return;
parallel_initialized = TRUE;
/* length of time. */
/*ARGSUSED*/
-void GC_do_blocking_inner(ptr_t data, void * context)
+GC_INNER void GC_do_blocking_inner(ptr_t data, void * context)
{
struct blocking_data * d = (struct blocking_data *) data;
GC_thread me;
GC_INNER volatile AO_TS_t GC_allocate_lock = AO_TS_INITIALIZER;
-void GC_lock(void)
+GC_INNER void GC_lock(void)
{
# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
}
#else /* !USE_SPINLOCK */
-void GC_lock(void)
+GC_INNER void GC_lock(void)
{
#ifndef NO_PTHREAD_TRYLOCK
if (1 == GC_nprocs || GC_collecting) {
static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
-void GC_acquire_mark_lock(void)
+GC_INNER void GC_acquire_mark_lock(void)
{
/*
if (pthread_mutex_lock(&mark_mutex) != 0) {
# endif
}
-void GC_release_mark_lock(void)
+GC_INNER void GC_release_mark_lock(void)
{
GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
# ifdef GC_ASSERTIONS
# endif
}
-void GC_wait_for_reclaim(void)
+GC_INNER void GC_wait_for_reclaim(void)
{
GC_acquire_mark_lock();
while (GC_fl_builder_count > 0) {
GC_release_mark_lock();
}
-void GC_notify_all_builder(void)
+GC_INNER void GC_notify_all_builder(void)
{
GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
if (pthread_cond_broadcast(&builder_cv) != 0) {
static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
-void GC_wait_marker(void)
+GC_INNER void GC_wait_marker(void)
{
GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
ASSERT_CANCEL_DISABLED();
# endif
}
-void GC_notify_all_marker(void)
+GC_INNER void GC_notify_all_marker(void)
{
if (pthread_cond_broadcast(&mark_cv) != 0) {
ABORT("pthread_cond_broadcast failed");
}
}
-static GC_bool printing_errors = FALSE;
-/* Print all objects on the list after printing any smashed objs. */
+/* Print all objects on the list after printing any smashed objects. */
/* Clear both lists. */
-void GC_print_all_errors (void)
+GC_INNER void GC_print_all_errors(void)
{
+ static GC_bool printing_errors = FALSE;
unsigned i;
LOCK();
*
*/
-
-/*
- * Test whether a block is completely empty, i.e. contains no marked
- * objects. This does not require the block to be in physical
- * memory.
- */
-
-GC_bool GC_block_empty(hdr *hhdr)
+/* Test whether a block is completely empty, i.e. contains no marked */
+/* objects. This does not require the block to be in physical memory. */
+GC_INNER GC_bool GC_block_empty(hdr *hhdr)
{
return (hhdr -> hb_n_marks == 0);
}
* Also called directly from GC_malloc_many.
* Sz is now in bytes.
*/
-ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
- GC_bool init, ptr_t list, signed_word *count)
+GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
+ GC_bool init, ptr_t list,
+ signed_word *count)
{
ptr_t result;
}
}
-#endif /* NO_DEBUGGING */
+#endif /* !NO_DEBUGGING */
/*
* Clear all obj_link pointers in the list of free objects *flp.
* Perform GC_reclaim_block on the entire heap, after first clearing
* small object free lists (if we are not just looking for leaks).
*/
-void GC_start_reclaim(GC_bool report_if_found)
+GC_INNER void GC_start_reclaim(GC_bool report_if_found)
{
unsigned kind;
* appropriate free list is nonempty, or there are no more blocks to
* sweep.
*/
-void GC_continue_reclaim(size_t sz /* granules */, int kind)
+GC_INNER void GC_continue_reclaim(size_t sz /* granules */, int kind)
{
hdr * hhdr;
struct hblk * hbp;
* recently reclaimed, and discard the rest.
* Stop_func may be 0.
*/
-GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
+GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
{
word sz;
unsigned kind;
/* Each thread structure must be initialized. */
/* This call must be made from the new thread. */
-void GC_init_thread_local(GC_tlfs p)
+GC_INNER void GC_init_thread_local(GC_tlfs p)
{
int i;
}
/* We hold the allocator lock. */
-void GC_destroy_thread_local(GC_tlfs p)
+GC_INNER void GC_destroy_thread_local(GC_tlfs p)
{
/* We currently only do this from the thread itself or from */
/* the fork handler for a child process. */
/* free lists explicitly, since the link field is often */
/* invisible to the marker. It knows how to find all threads; */
/* we take care of an individual thread freelist structure. */
-void GC_mark_thread_local_fls_for(GC_tlfs p)
+GC_INNER void GC_mark_thread_local_fls_for(GC_tlfs p)
{
ptr_t q;
int j;
}
#ifdef MPROTECT_VDB
- LONG WINAPI GC_write_fault_handler(
+ GC_INNER LONG WINAPI GC_write_fault_handler(
struct _EXCEPTION_POINTERS *exc_info);
#endif
#if defined(GWW_VDB) && defined(MPROTECT_VDB)
- GC_bool GC_gww_dirty_init(void);
+ GC_INNER GC_bool GC_gww_dirty_init(void);
/* Defined in os_dep.c. Returns TRUE if GetWriteWatch is available. */
/* may be called repeatedly. */
#endif
/* Called by GC_finalize() (in case of an allocation failure observed). */
/* GC_reset_finalizer_nested() is the same as in pthread_support.c. */
-void GC_reset_finalizer_nested(void)
+GC_INNER void GC_reset_finalizer_nested(void)
{
GC_thread me = GC_lookup_thread_inner(GetCurrentThreadId());
me->finalizer_nested = 0;
/* otherwise returns a pointer to the thread-local finalizer_nested. */
/* Called by GC_notify_or_invoke_finalizers() only (the lock is held). */
/* GC_check_finalizer_nested() is the same as in pthread_support.c. */
-unsigned *GC_check_finalizer_nested(void)
+GC_INNER unsigned *GC_check_finalizer_nested(void)
{
GC_thread me = GC_lookup_thread_inner(GetCurrentThreadId());
unsigned nesting_level = me->finalizer_nested;
/* GC_do_blocking_inner() is nearly the same as in pthread_support.c */
/*ARGSUSED*/
-void GC_do_blocking_inner(ptr_t data, void * context)
+GC_INNER void GC_do_blocking_inner(ptr_t data, void * context)
{
struct blocking_data * d = (struct blocking_data *) data;
DWORD t = GetCurrentThreadId();
# endif
}
-void GC_stop_world(void)
+GC_INNER void GC_stop_world(void)
{
DWORD thread_id = GetCurrentThreadId();
# endif
}
-void GC_start_world(void)
+GC_INNER void GC_start_world(void)
{
DWORD thread_id = GetCurrentThreadId();
int i;
} /* thread looks live */
}
-void GC_push_all_stacks(void)
+GC_INNER void GC_push_all_stacks(void)
{
DWORD me = GetCurrentThreadId();
GC_bool found_me = FALSE;
/* Return stack bounds in *lo and *hi. If no such stack */
/* is found, both *hi and *lo will be set to an address */
/* higher than limit. */
-void GC_get_next_stack(char *start, char *limit,
+GC_INNER void GC_get_next_stack(char *start, char *limit,
char **lo, char **hi)
{
int i;
AO_t GC_block_count = 0;
# endif
- void GC_acquire_mark_lock(void)
+ GC_INNER void GC_acquire_mark_lock(void)
{
if (pthread_mutex_lock(&mark_mutex) != 0) {
ABORT("pthread_mutex_lock failed");
# endif
}
- void GC_release_mark_lock(void)
+ GC_INNER void GC_release_mark_lock(void)
{
GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
# ifdef GC_ASSERTIONS
# endif
}
- void GC_wait_for_reclaim(void)
+ GC_INNER void GC_wait_for_reclaim(void)
{
GC_acquire_mark_lock();
while (GC_fl_builder_count > 0) {
GC_release_mark_lock();
}
- void GC_notify_all_builder(void)
+ GC_INNER void GC_notify_all_builder(void)
{
GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
if (pthread_cond_broadcast(&builder_cv) != 0) {
static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
- void GC_wait_marker(void)
+ GC_INNER void GC_wait_marker(void)
{
GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
# ifdef GC_ASSERTIONS
# endif
}
- void GC_notify_all_marker(void)
+ GC_INNER void GC_notify_all_marker(void)
{
if (pthread_cond_broadcast(&mark_cv) != 0) {
ABORT("pthread_cond_broadcast failed");
AO_t GC_unlocked_count = 0;
# endif
- void GC_acquire_mark_lock(void)
+ GC_INNER void GC_acquire_mark_lock(void)
{
# ifdef DONT_USE_SIGNALANDWAIT
if (InterlockedExchange(&GC_mark_mutex_state, 1 /* locked */) != 0)
# endif
}
- void GC_release_mark_lock(void)
+ GC_INNER void GC_release_mark_lock(void)
{
GC_ASSERT(GC_mark_lock_holder == (unsigned long)GetCurrentThreadId());
# ifdef GC_ASSERTIONS
/* mark_mutex and the checked condition (GC_fl_builder_count == 0) */
/* is the only one for which broadcasting on builder_cv is performed. */
- void GC_wait_for_reclaim(void)
+ GC_INNER void GC_wait_for_reclaim(void)
{
GC_ASSERT(builder_cv != 0);
for (;;) {
GC_release_mark_lock();
}
- void GC_notify_all_builder(void)
+ GC_INNER void GC_notify_all_builder(void)
{
GC_ASSERT(GC_mark_lock_holder == (unsigned long)GetCurrentThreadId());
GC_ASSERT(builder_cv != 0);
/* mark_cv is used (for waiting) by a non-helper thread. */
- void GC_wait_marker(void)
+ GC_INNER void GC_wait_marker(void)
{
HANDLE event = mark_cv;
DWORD id = GetCurrentThreadId();
GC_acquire_mark_lock();
}
- void GC_notify_all_marker(void)
+ GC_INNER void GC_notify_all_marker(void)
{
DWORD id = GetCurrentThreadId();
int i = (int)GC_markers - 1;
static SignalObjectAndWait_type signalObjectAndWait_func = 0;
# endif
- void GC_wait_marker(void)
+ GC_INNER void GC_wait_marker(void)
{
/* Here we assume that GC_wait_marker() is always called */
/* from a while(check_cond) loop. */
}
}
- void GC_notify_all_marker(void)
+ GC_INNER void GC_notify_all_marker(void)
{
GC_ASSERT(mark_cv != 0);
if (PulseEvent(mark_cv) == FALSE)
#endif /* GC_WINMAIN_REDIRECT */
/* Called by GC_init() - we hold the allocation lock. */
-void GC_thr_init(void)
+GC_INNER void GC_thr_init(void)
{
struct GC_stack_base sb;
# ifdef GC_ASSERTIONS
/* may require allocation. */
/* Called without allocation lock. */
/* Must be called before a second thread is created. */
-void GC_init_parallel(void)
+GC_INNER void GC_init_parallel(void)
{
if (parallel_initialized) return;
parallel_initialized = TRUE;
/* holding the allocation lock for an */
/* extended period. */
- void GC_lock(void)
+ GC_INNER void GC_lock(void)
{
pthread_mutex_lock(&GC_allocate_ml);
}
/* list links wouldn't otherwise be found. We also set them in the */
/* normal free lists, since that involves touching less memory than if */
/* we scanned them normally. */
- void GC_mark_thread_local_free_lists(void)
+ GC_INNER void GC_mark_thread_local_free_lists(void)
{
int i;
GC_thread p;