2009-09-10 Ivan Maidanski <ivmai@mail.ru>
+ (diff115)
+
+ * finalize.c (GC_general_register_disappearing_link,
+ GC_register_finalizer_inner): Remove unnecessary "ifdef THREADS"
+ guard for LOCK/UNLOCK().
+ * finalize.c (GC_general_register_disappearing_link,
+ GC_register_finalizer_inner): Get GC_oom_fn value before releasing
+ the lock (to prevent data races).
+ * gcj_mlc.c (GC_gcj_malloc, GC_debug_gcj_malloc,
+ GC_gcj_malloc_ignore_off_page): Ditto.
+ * mallocx.c (GC_generic_malloc_ignore_off_page): Ditto.
+ * include/gc_inline.h (GC_FAST_MALLOC_GRANS): Use GC_get_oom_fn()
+ instead of GC_oom_fn (to prevent data races).
+ * malloc.c (GC_generic_malloc): Ditto.
+ * mallocx.c (GC_memalign): Ditto.
+ * pthread_support.c (pthread_create): Ditto.
+ * gcj_mlc.c (maybe_finalize): Acquire the lock before setting
+ last_finalized_no value to prevent data races.
+ * include/gc.h (GC_gc_no, GC_get_gc_no, GC_oom_fn, GC_set_oom_fn,
+ GC_set_find_leak, GC_set_finalize_on_demand,
+ GC_set_java_finalization, GC_set_finalizer_notifier,
+ GC_set_dont_expand, GC_set_full_freq, GC_set_non_gc_bytes,
+ GC_set_no_dls, GC_set_free_space_divisor, GC_set_max_retries,
+ GC_set_dont_precollect, GC_set_time_limit, GC_warn_proc): Refine
+ the comment.
+ * misc.c (GC_set_oom_fn): Ditto.
+ * include/gc.h (GC_general_register_disappearing_link): Refine the
+ comment (replace "soft" word with "weak").
+ * misc.c (GC_oom_fn, GC_get_gc_no, GC_get_parallel,
+ GC_set_finalizer_notifier, GC_set_find_leak): Add the comment.
+ * misc.c (GC_set_oom_fn, GC_get_oom_fn, GC_set_finalizer_notifier,
+ GC_get_finalizer_notifier): Use LOCK/UNLOCK to prevent data races.
+
+2009-09-10 Ivan Maidanski <ivmai@mail.ru>
(diff114a, diff114b, diff114c)
* dbg_mlc.c: Guard include <errno.h> with ifndef MSWINCE; include
if (((word)link & (ALIGNMENT-1)) || link == NULL)
ABORT("Bad arg to GC_general_register_disappearing_link");
-# ifdef THREADS
- LOCK();
-# endif
+ LOCK();
GC_ASSERT(obj != NULL && GC_base(obj) == obj);
if (log_dl_table_size == -1
|| GC_dl_entries > ((word)1 << log_dl_table_size)) {
for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
curr_dl -> dl_hidden_obj = HIDE_POINTER(obj);
-# ifdef THREADS
- UNLOCK();
-# endif
+ UNLOCK();
return(1);
}
}
new_dl = (struct disappearing_link *)
GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
if (0 == new_dl) {
-# ifdef THREADS
- UNLOCK();
-# endif
+ GC_oom_func oom_fn = GC_oom_fn;
+ UNLOCK();
new_dl = (struct disappearing_link *)
- GC_oom_fn(sizeof(struct disappearing_link));
+ (*oom_fn)(sizeof(struct disappearing_link));
if (0 == new_dl) {
return(2);
}
/* It's not likely we'll make it here, but ... */
-# ifdef THREADS
- LOCK();
-# endif
+ LOCK();
}
new_dl -> dl_hidden_obj = HIDE_POINTER(obj);
new_dl -> dl_hidden_link = HIDE_POINTER(link);
dl_set_next(new_dl, dl_head[index]);
dl_head[index] = new_dl;
GC_dl_entries++;
-# ifdef THREADS
- UNLOCK();
-# endif
+ UNLOCK();
return(0);
}
size_t index;
struct finalizable_object *new_fo;
hdr *hhdr;
+ GC_oom_func oom_fn;
DCL_LOCK_STATE;
-# ifdef THREADS
- LOCK();
-# endif
+ LOCK();
if (log_fo_table_size == -1
|| GC_fo_entries > ((word)1 << log_fo_table_size)) {
GC_grow_table((struct hash_chain_entry ***)(&fo_head),
fo_set_next(prev_fo, curr_fo);
}
}
-# ifdef THREADS
- UNLOCK();
-# endif
+ UNLOCK();
return;
}
prev_fo = curr_fo;
if (ofn) *ofn = 0;
if (ocd) *ocd = 0;
if (fn == 0) {
-# ifdef THREADS
- UNLOCK();
-# endif
+ UNLOCK();
return;
}
GET_HDR(base, hhdr);
if (0 == hhdr) {
/* We won't collect it, hence finalizer wouldn't be run. */
-# ifdef THREADS
- UNLOCK();
-# endif
+ UNLOCK();
return;
}
new_fo = (struct finalizable_object *)
GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
if (EXPECT(0 == new_fo, FALSE)) {
-# ifdef THREADS
- UNLOCK();
-# endif
+ oom_fn = GC_oom_fn;
+ UNLOCK();
new_fo = (struct finalizable_object *)
- GC_oom_fn(sizeof(struct finalizable_object));
+ (*oom_fn)(sizeof(struct finalizable_object));
if (0 == new_fo) {
return;
}
/* It's not likely we'll make it here, but ... */
-# ifdef THREADS
- LOCK();
-# endif
+ LOCK();
}
GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
fo_set_next(new_fo, fo_head[index]);
GC_fo_entries++;
fo_head[index] = new_fo;
-# ifdef THREADS
- UNLOCK();
-# endif
+ UNLOCK();
}
GC_API void GC_CALL GC_register_finalizer(void * obj,
#endif
/* Public read-only variables */
-/* Getter procedures are supplied in some cases and preferred for new */
-/* code. */
+/* The supplied getter functions are preferred for new code. */
GC_API GC_word GC_gc_no;/* Counter incremented per collection. */
/* Includes empty GCs at startup. */
GC_API GC_word GC_CALL GC_get_gc_no(void);
+ /* GC_get_gc_no() uses no synchronization, so */
+ /* it requires GC_call_with_alloc_lock() to */
+ /* avoid data races on multiprocessors. */
GC_API int GC_parallel; /* GC is parallelized for performance on */
/* multiprocessors. Currently set only */
/* Public R/W variables */
+/* The supplied setter and getter functions are preferred for new code. */
typedef void * (GC_CALLBACK * GC_oom_func)(size_t /* bytes_requested */);
GC_API GC_oom_func GC_oom_fn;
/* When there is insufficient memory to satisfy */
/* an allocation request, we return */
- /* (*GC_oom_fn)(). By default this just */
- /* returns 0. */
+ /* (*GC_oom_fn)(size). By default this just */
+ /* returns NULL. */
/* If it returns, it must return 0 or a valid */
/* pointer to a previously allocated heap */
- /* object. */
+ /* object. GC_oom_fn must not be 0. */
+ /* Both the supplied setter and the getter */
+ /* acquire the GC lock (to avoid data races). */
GC_API void GC_CALL GC_set_oom_fn(GC_oom_func);
GC_API GC_oom_func GC_CALL GC_get_oom_fn(void);
/* report inaccessible memory that was not */
/* deallocated with GC_free. Initial value */
/* is determined by FIND_LEAK macro. */
+ /* The setter and getter are unsynchronized, so */
+ /* GC_call_with_alloc_lock() is required to */
+ /* avoid data races (if the value is modified */
+ /* after the GC is put to multi-threaded mode). */
GC_API void GC_CALL GC_set_find_leak(int);
GC_API int GC_CALL GC_get_find_leak(void);
/* call. The default is determined by whether */
/* the FINALIZE_ON_DEMAND macro is defined */
/* when the collector is built. */
+ /* The setter and getter are unsynchronized, so */
+ /* GC_call_with_alloc_lock() is required to */
+ /* avoid data races (if the value is modified */
+ /* after the GC is put to multi-threaded mode). */
GC_API void GC_CALL GC_set_finalize_on_demand(int);
GC_API int GC_CALL GC_get_finalize_on_demand(void);
/* determined by JAVA_FINALIZATION macro. */
/* Enables register_finalizer_unreachable to */
/* work correctly. */
+ /* The setter and getter are unsynchronized, so */
+ /* GC_call_with_alloc_lock() is required to */
+ /* avoid data races (if the value is modified */
+ /* after the GC is put to multi-threaded mode). */
GC_API void GC_CALL GC_set_java_finalization(int);
GC_API int GC_CALL GC_get_java_finalization(void);
/* GC_finalize_on_demand is set. */
/* Typically this will notify a finalization */
/* thread, which will call GC_invoke_finalizers */
- /* in response. */
+ /* in response. May be 0 (means no notifier). */
+ /* Both the supplied setter and the getter */
+ /* acquire the GC lock (to avoid data races). */
GC_API void GC_CALL GC_set_finalizer_notifier(GC_finalizer_notifier_proc);
GC_API GC_finalizer_notifier_proc GC_CALL GC_get_finalizer_notifier(void);
GC_API int GC_dont_expand;
/* Dont expand heap unless explicitly requested */
/* or forced to. */
+ /* The setter and getter are unsynchronized, so */
+ /* GC_call_with_alloc_lock() is required to */
+ /* avoid data races (if the value is modified */
+ /* after the GC is put to multi-threaded mode). */
GC_API void GC_CALL GC_set_dont_expand(int);
GC_API int GC_CALL GC_get_dont_expand(void);
/* blocks. Values in the tens are now */
/* perfectly reasonable, unlike for */
/* earlier GC versions. */
+ /* The setter and getter are unsynchronized, so */
+ /* GC_call_with_alloc_lock() is required to */
+ /* avoid data races (if the value is modified */
+ /* after the GC is put to multi-threaded mode). */
GC_API void GC_CALL GC_set_full_freq(int);
GC_API int GC_CALL GC_get_full_freq(void);
/* Used only to control scheduling of collections. */
/* Updated by GC_malloc_uncollectable and GC_free. */
/* Wizards only. */
+ /* The setter and getter are unsynchronized, so */
+ /* GC_call_with_alloc_lock() is required to */
+ /* avoid data races (if the value is modified */
+ /* after the GC is put to multi-threaded mode). */
GC_API void GC_CALL GC_set_non_gc_bytes(GC_word);
GC_API GC_word GC_CALL GC_get_non_gc_bytes(void);
/* In Microsoft Windows environments, this will */
/* usually also prevent registration of the */
/* main data segment as part of the root set. */
+ /* The setter and getter are unsynchronized, so */
+ /* GC_call_with_alloc_lock() is required to */
+ /* avoid data races (if the value is modified */
+ /* after the GC is put to multi-threaded mode). */
GC_API void GC_CALL GC_set_no_dls(int);
GC_API int GC_CALL GC_get_no_dls(void);
/* but more collection time. Decreasing it */
/* will appreciably decrease collection time */
/* at the expense of space. */
+ /* The setter and getter are unsynchronized, so */
+ /* GC_call_with_alloc_lock() is required to */
+ /* avoid data races (if the value is modified */
+ /* after the GC is put to multi-threaded mode). */
GC_API void GC_CALL GC_set_free_space_divisor(GC_word);
GC_API GC_word GC_CALL GC_get_free_space_divisor(void);
/* The maximum number of GCs attempted before */
/* reporting out of memory after heap */
/* expansion fails. Initially 0. */
+ /* The setter and getter are unsynchronized, so */
+ /* GC_call_with_alloc_lock() is required to */
+ /* avoid data races (if the value is modified */
+ /* after the GC is put to multi-threaded mode). */
GC_API void GC_CALL GC_set_max_retries(GC_word);
GC_API GC_word GC_CALL GC_get_max_retries(void);
/* before the first collection. */
/* Interferes with blacklisting. */
/* Wizards only. */
+ /* The setter and getter are unsynchronized, so */
+ /* GC_call_with_alloc_lock() is required to */
+ /* avoid data races (if the value is modified */
+ /* after the GC is put to multi-threaded mode). */
GC_API void GC_CALL GC_set_dont_precollect(int);
GC_API int GC_CALL GC_get_dont_precollect(void);
/* Setting GC_time_limit to this value */
/* will disable the "pause time exceeded"*/
/* tests. */
+ /* The setter and getter are unsynchronized, so */
+ /* GC_call_with_alloc_lock() is required to */
+ /* avoid data races (if the value is modified */
+ /* after the GC is put to multi-threaded mode). */
GC_API void GC_CALL GC_set_time_limit(unsigned long);
GC_API unsigned long GC_CALL GC_get_time_limit(void);
/* can be used to implement weak pointers easily and */
/* safely. Typically link will point to a location */
/* holding a disguised pointer to obj. (A pointer */
- /* inside an "atomic" object is effectively */
- /* disguised.) In this way soft */
- /* pointers are broken before any object */
- /* reachable from them are finalized. Each link */
- /* May be registered only once, i.e. with one obj */
- /* value. This was added after a long email discussion */
- /* with John Ellis. */
- /* Obj must be a pointer to the first word of an object */
- /* we allocated. It is unsafe to explicitly deallocate */
- /* the object containing link. Explicitly deallocating */
- /* obj may or may not cause link to eventually be */
- /* cleared. */
- /* This can be used to implement certain types of */
- /* weak pointers. Note however that this generally */
+ /* inside an "atomic" object is effectively disguised.) */
+ /* In this way, weak pointers are broken before any */
+ /* object reachable from them gets finalized. */
+ /* Each link may be registered only with one obj value, */
+ /* i.e. all objects but the last one (link registered */
+ /* with) are ignored. This was added after a long */
+ /* email discussion with John Ellis. */
+ /* link must be non-NULL (and be properly aligned). */
+ /* obj must be a pointer to the first word of an object */
+ /* allocated by GC_malloc or friends. It is unsafe to */
+ /* explicitly deallocate the object containing link. */
+ /* Explicit deallocation of obj may or may not cause */
+ /* link to eventually be cleared. */
+ /* This function can be used to implement certain types */
+ /* of weak pointers. Note, however, this generally */
/* requires that the allocation lock is held (see */
/* GC_call_with_alloc_lock() below) when the disguised */
/* pointer is accessed. Otherwise a strong pointer */
#endif
/* GC_set_warn_proc can be used to redirect or filter warning messages. */
-/* p may not be a NULL pointer. */
+/* p may not be a NULL pointer. Both the setter and the getter acquire */
+/* the GC lock (to avoid data races). */
typedef void (GC_CALLBACK * GC_warn_proc) (char *msg, GC_word arg);
GC_API void GC_CALL GC_set_warn_proc(GC_warn_proc p);
/* GC_get_warn_proc returns the current warn_proc. */