/* finalizers can only be called from some kind of "safe state" and */
/* getting into that safe state is expensive.) */
GC_API int GC_CALL GC_should_invoke_finalizers(void)
+ GC_ATTR_NO_SANITIZE_THREAD
{
return GC_fnlz_roots.finalize_now != NULL;
}
word bytes_freed_before = 0; /* initialized to prevent warning. */
DCL_LOCK_STATE;
- while (GC_fnlz_roots.finalize_now != NULL) {
+ while (GC_should_invoke_finalizers()) {
struct finalizable_object * curr_fo;
# ifdef THREADS
# if defined(THREADS) && !defined(KEEP_BACK_PTRS) \
&& !defined(MAKE_BACK_GRAPH)
/* Quick check (while unlocked) for an empty finalization queue. */
- if (NULL == GC_fnlz_roots.finalize_now) return;
+ if (!GC_should_invoke_finalizers())
+ return;
# endif
LOCK();
# define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD
# define I_HOLD_LOCK() (!GC_need_to_lock \
|| GC_lock_holder == GetCurrentThreadId())
-# define I_DONT_HOLD_LOCK() (!GC_need_to_lock \
+# ifdef THREAD_SANITIZER
+# define I_DONT_HOLD_LOCK() TRUE /* Conservatively say yes */
+# else
+# define I_DONT_HOLD_LOCK() (!GC_need_to_lock \
|| GC_lock_holder != GetCurrentThreadId())
+# endif
# define UNCOND_LOCK() \
{ GC_ASSERT(I_DONT_HOLD_LOCK()); \
EnterCriticalSection(&GC_allocate_ml); \
# define I_HOLD_LOCK() \
(!GC_need_to_lock \
|| GC_lock_holder == NUMERIC_THREAD_ID(pthread_self()))
-# ifndef NUMERIC_THREAD_ID_UNIQUE
-# define I_DONT_HOLD_LOCK() 1 /* Conservatively say yes */
+# if !defined(NUMERIC_THREAD_ID_UNIQUE) || defined(THREAD_SANITIZER)
+# define I_DONT_HOLD_LOCK() TRUE /* Conservatively say yes */
# else
# define I_DONT_HOLD_LOCK() \
(!GC_need_to_lock \
# endif
# undef GC_ALWAYS_MULTITHREADED
GC_EXTERN GC_bool GC_need_to_lock;
+# ifdef THREAD_SANITIZER
+ /* To workaround TSan false positive (e.g., when */
+ /* GC_pthread_create is called from multiple threads in */
+ /* parallel), do not set GC_need_to_lock if it is already set. */
+# define set_need_to_lock() \
+ (void)(*(GC_bool volatile *)&GC_need_to_lock \
+ ? FALSE \
+ : (GC_need_to_lock = TRUE))
+# else
# define set_need_to_lock() (void)(GC_need_to_lock = TRUE)
/* We are multi-threaded now. */
+# endif
# endif
# else /* !THREADS */
# endif
#endif /* !GC_ATTR_NO_SANITIZE_MEMORY */
+#ifndef GC_ATTR_NO_SANITIZE_THREAD
+# ifdef THREAD_SANITIZER
+# define GC_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize("thread")))
+# else
+# define GC_ATTR_NO_SANITIZE_THREAD /* empty */
+# endif
+#endif /* !GC_ATTR_NO_SANITIZE_THREAD */
+
#ifndef GC_ATTR_UNUSED
# if GC_GNUC_PREREQ(3, 4)
# define GC_ATTR_UNUSED __attribute__((__unused__))
# if __has_feature(memory_sanitizer) && !defined(MEMORY_SANITIZER)
# define MEMORY_SANITIZER
# endif
+# if __has_feature(thread_sanitizer) && !defined(THREAD_SANITIZER)
+# define THREAD_SANITIZER
+# endif
#endif
#if defined(SPARC)
/* Single argument version, robust against whole program analysis. */
volatile word GC_noop_sink;
-GC_API void GC_CALL GC_noop1(word x)
+GC_API void GC_CALL GC_noop1(word x) GC_ATTR_NO_SANITIZE_THREAD
{
GC_noop_sink = x;
}
}
#endif
+#ifdef THREADS
+ /* Used to occasionally clear a bigger chunk. */
+ /* TODO: Should be more random than it is ... */
+ static unsigned next_random_no(void) GC_ATTR_NO_SANITIZE_THREAD
+ {
+ static unsigned random_no = 0;
+ return ++random_no % 13;
+ }
+#endif /* THREADS */
+
/* Clear some of the inaccessible part of the stack. Returns its */
/* argument, so it can be used in a tail call position, hence clearing */
/* another frame. */
ptr_t sp = GC_approx_sp(); /* Hotter than actual sp */
# ifdef THREADS
word volatile dummy[SMALL_CLEAR_SIZE];
- static unsigned random_no = 0;
- /* Should be more random than it is ... */
- /* Used to occasionally clear a bigger */
- /* chunk. */
# endif
# define SLOP 400
/* thus more junk remains accessible, thus the heap gets */
/* larger ... */
# ifdef THREADS
- if (++random_no % 13 == 0) {
+ if (next_random_no() == 0) {
ptr_t limit = sp;
MAKE_HOTTER(limit, BIG_CLEAR_SIZE*sizeof(word));
GC_INNER volatile AO_TS_t GC_fault_handler_lock = AO_TS_INITIALIZER;
static void async_set_pht_entry_from_index(volatile page_hash_table db,
size_t index)
+ GC_ATTR_NO_SANITIZE_THREAD
{
while (AO_test_and_set_acquire(&GC_fault_handler_lock) == AO_TS_SET) {
/* empty */
errno = old_errno;
}
+static void update_last_stop_count(GC_thread me, AO_t my_stop_count)
+ GC_ATTR_NO_SANITIZE_THREAD
+{
+ me -> stop_info.last_stop_count = my_stop_count;
+}
+
STATIC void GC_suspend_handler_inner(ptr_t dummy GC_ATTR_UNUSED,
void * context GC_ATTR_UNUSED)
{
/* thread has been stopped. Note that sem_post() is */
/* the only async-signal-safe primitive in LinuxThreads. */
sem_post(&GC_suspend_ack_sem);
- me -> stop_info.last_stop_count = my_stop_count;
+ update_last_stop_count(me, my_stop_count);
/* Wait until that thread tells us to restart by sending */
/* this thread a GC_sig_thr_restart signal (should be masked */
}
#endif
-#define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */
+#ifndef SPIN_MAX
+# define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */
/* give up. */
+#endif
-GC_INNER volatile GC_bool GC_collecting = 0;
+GC_INNER volatile GC_bool GC_collecting = FALSE;
/* A hint that we're in the collector and */
/* holding the allocation lock for an */
/* extended period. */
#endif /* !USE_SPIN_LOCK || ... */
+#if defined(THREAD_SANITIZER) \
+ && (defined(USE_SPIN_LOCK) || !defined(NO_PTHREAD_TRYLOCK))
+ /* GC_collecting is a hint, a potential data race between */
+ /* GC_lock() and ENTER/EXIT_GC() is OK to ignore. */
+ static GC_bool is_collecting(void) GC_ATTR_NO_SANITIZE_THREAD
+ {
+ return GC_collecting;
+ }
+#else
+# define is_collecting() GC_collecting
+#endif
+
#if defined(USE_SPIN_LOCK)
/* Reasonably fast spin locks. Basically the same implementation */
GC_INNER volatile AO_TS_t GC_allocate_lock = AO_TS_INITIALIZER;
+# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
+# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
+ static unsigned spin_max = low_spin_max;
+ static unsigned last_spins = 0;
+
+ /* A potential data race between threads invoking GC_lock which reads */
+ /* and updates spin_max and last_spins could be ignored because these */
+ /* variables are hints only. (Atomic getters and setters are avoided */
+ /* here for performance reasons.) */
+ static void set_last_spins_and_high_spin_max(unsigned new_last_spins)
+ GC_ATTR_NO_SANITIZE_THREAD
+ {
+ last_spins = new_last_spins;
+ spin_max = high_spin_max;
+ }
+
+ static void reset_spin_max(void) GC_ATTR_NO_SANITIZE_THREAD
+ {
+ spin_max = low_spin_max;
+ }
+
GC_INNER void GC_lock(void)
{
-# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
-# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
- static unsigned spin_max = low_spin_max;
unsigned my_spin_max;
- static unsigned last_spins = 0;
unsigned my_last_spins;
unsigned i;
my_spin_max = spin_max;
my_last_spins = last_spins;
for (i = 0; i < my_spin_max; i++) {
- if (GC_collecting || GC_nprocs == 1) goto yield;
+ if (is_collecting() || GC_nprocs == 1)
+ goto yield;
if (i < my_last_spins/2) {
GC_pause();
continue;
* against the other process with which we were contending.
* Thus it makes sense to spin longer the next time.
*/
- last_spins = i;
- spin_max = high_spin_max;
+ set_last_spins_and_high_spin_max(i);
return;
}
}
/* We are probably being scheduled against the other process. Sleep. */
- spin_max = low_spin_max;
+ reset_spin_max();
yield:
for (i = 0;; ++i) {
if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
}
#else /* !USE_SPIN_LOCK */
+
GC_INNER void GC_lock(void)
{
#ifndef NO_PTHREAD_TRYLOCK
- if (1 == GC_nprocs || GC_collecting) {
+ if (1 == GC_nprocs || is_collecting()) {
pthread_mutex_lock(&GC_allocate_ml);
} else {
GC_generic_lock(&GC_allocate_ml);