From b850e1c9bb460d54d7a5e1131fe78f3474700251 Mon Sep 17 00:00:00 2001 From: Ivan Maidanski Date: Fri, 20 Oct 2017 00:25:52 +0300 Subject: [PATCH] Workaround Thread Sanitizer (TSan) false positive warnings (partially) This patch covers only data race false positive warnings reported in async_set_pht_entry_from_index, GC_clear_stack, GC_invoke_finalizers, GC_lock, GC_noop1, GC_notify_or_invoke_finalizers, GC_pthread_create, GC_suspend_handler_inner, I_DONT_HOLD_LOCK. * finalize.c (GC_should_invoke_finalizers): Add GC_ATTR_NO_SANITIZE_THREAD. * mark.c (GC_noop1): Likewise. * os_dep.c [MPROTECT_VDB && THREADS && AO_HAVE_test_and_set_acquire] (async_set_pht_entry_from_index): Likewise. * finalize.c (GC_invoke_finalizers, GC_notify_or_invoke_finalizers): Call GC_should_invoke_finalizers() instead of GC_fnlz_roots.finalize_now!=NULL. * include/private/gc_locks.h [(GC_WIN32_THREADS && !USE_PTHREAD_LOCKS || GC_PTHREADS) && GC_ASSERTIONS && THREAD_SANITIZER] (I_DONT_HOLD_LOCK): Define to TRUE; add comment. * include/private/gc_locks.h [!GC_ALWAYS_MULTITHREADED && THREAD_SANITIZER] (set_need_to_lock): Do not set GC_need_to_lock if already set; add comment. * include/private/gc_priv.h [!GC_ATTR_NO_SANITIZE_THREAD] (GC_ATTR_NO_SANITIZE_THREAD): New macro. * include/private/gcconfig.h [__has_feature && __has_feature(thread_sanitizer)] (THREAD_SANITIZER): Define. * misc.c [THREADS] (next_random_no): New static function (with GC_ATTR_NO_SANITIZE_THREAD). * pthread_stop_world.c [!GC_OPENBSD_UTHREADS && !NACL] (update_last_stop_count): Likewise. * pthread_support.c [THREAD_SANITIZER && (USE_SPIN_LOCK || !NO_PTHREAD_TRYLOCK)] (is_collecting): Likewise. * pthread_support.c [USE_SPIN_LOCK] (set_last_spins_and_high_spin_max, reset_spin_max): Likewise. * misc.c [THREADS] (GC_clear_stack): Remove random_no static variable; use next_random_no() instead of ++random_no%13. * pthread_stop_world.c [!GC_OPENBSD_UTHREADS && !NACL] (GC_suspend_handler_inner): Call update_last_stop_count() instead of me->stop_info.last_stop_count=my_stop_count. * pthread_support.c [USE_SPIN_LOCK || !NO_PTHREAD_TRYLOCK] (SPIN_MAX): Define only if not yet. * pthread_support.c [USE_SPIN_LOCK || !NO_PTHREAD_TRYLOCK] (GC_collecting): Initialize to FALSE instead of 0. * pthread_support.c [!(THREAD_SANITIZER && (USE_SPIN_LOCK || !NO_PTHREAD_TRYLOCK))] (is_collecting): Define as a macro. * pthread_support.c [USE_SPIN_LOCK] (low_spin_max, high_spin_max, spin_max, last_spins): Move definition out of GC_lock(). * pthread_support.c (GC_lock): Use is_collecting(), set_last_spins_and_high_spin_max() and reset_spin_max(). --- finalize.c | 6 ++++-- include/private/gc_locks.h | 20 ++++++++++++++--- include/private/gc_priv.h | 8 +++++++ include/private/gcconfig.h | 3 +++ mark.c | 2 +- misc.c | 16 +++++++++----- os_dep.c | 1 + pthread_stop_world.c | 8 ++++++- pthread_support.c | 54 ++++++++++++++++++++++++++++++++++++---------- 9 files changed, 95 insertions(+), 23 deletions(-) diff --git a/finalize.c b/finalize.c index 88ce2eb..0c39b54 100644 --- a/finalize.c +++ b/finalize.c @@ -1183,6 +1183,7 @@ GC_INNER void GC_finalize(void) /* finalizers can only be called from some kind of "safe state" and */ /* getting into that safe state is expensive.) */ GC_API int GC_CALL GC_should_invoke_finalizers(void) + GC_ATTR_NO_SANITIZE_THREAD { return GC_fnlz_roots.finalize_now != NULL; } @@ -1195,7 +1196,7 @@ GC_API int GC_CALL GC_invoke_finalizers(void) word bytes_freed_before = 0; /* initialized to prevent warning. */ DCL_LOCK_STATE; - while (GC_fnlz_roots.finalize_now != NULL) { + while (GC_should_invoke_finalizers()) { struct finalizable_object * curr_fo; # ifdef THREADS @@ -1244,7 +1245,8 @@ GC_INNER void GC_notify_or_invoke_finalizers(void) # if defined(THREADS) && !defined(KEEP_BACK_PTRS) \ && !defined(MAKE_BACK_GRAPH) /* Quick check (while unlocked) for an empty finalization queue. */ - if (NULL == GC_fnlz_roots.finalize_now) return; + if (!GC_should_invoke_finalizers()) + return; # endif LOCK(); diff --git a/include/private/gc_locks.h b/include/private/gc_locks.h index c961869..e417a6b 100644 --- a/include/private/gc_locks.h +++ b/include/private/gc_locks.h @@ -66,8 +66,12 @@ # define UNSET_LOCK_HOLDER() GC_lock_holder = NO_THREAD # define I_HOLD_LOCK() (!GC_need_to_lock \ || GC_lock_holder == GetCurrentThreadId()) -# define I_DONT_HOLD_LOCK() (!GC_need_to_lock \ +# ifdef THREAD_SANITIZER +# define I_DONT_HOLD_LOCK() TRUE /* Conservatively say yes */ +# else +# define I_DONT_HOLD_LOCK() (!GC_need_to_lock \ || GC_lock_holder != GetCurrentThreadId()) +# endif # define UNCOND_LOCK() \ { GC_ASSERT(I_DONT_HOLD_LOCK()); \ EnterCriticalSection(&GC_allocate_ml); \ @@ -178,8 +182,8 @@ # define I_HOLD_LOCK() \ (!GC_need_to_lock \ || GC_lock_holder == NUMERIC_THREAD_ID(pthread_self())) -# ifndef NUMERIC_THREAD_ID_UNIQUE -# define I_DONT_HOLD_LOCK() 1 /* Conservatively say yes */ +# if !defined(NUMERIC_THREAD_ID_UNIQUE) || defined(THREAD_SANITIZER) +# define I_DONT_HOLD_LOCK() TRUE /* Conservatively say yes */ # else # define I_DONT_HOLD_LOCK() \ (!GC_need_to_lock \ @@ -203,8 +207,18 @@ # endif # undef GC_ALWAYS_MULTITHREADED GC_EXTERN GC_bool GC_need_to_lock; +# ifdef THREAD_SANITIZER + /* To workaround TSan false positive (e.g., when */ + /* GC_pthread_create is called from multiple threads in */ + /* parallel), do not set GC_need_to_lock if it is already set. */ +# define set_need_to_lock() \ + (void)(*(GC_bool volatile *)&GC_need_to_lock \ + ? FALSE \ + : (GC_need_to_lock = TRUE)) +# else # define set_need_to_lock() (void)(GC_need_to_lock = TRUE) /* We are multi-threaded now. */ +# endif # endif # else /* !THREADS */ diff --git a/include/private/gc_priv.h b/include/private/gc_priv.h index 0b13b48..77db24b 100644 --- a/include/private/gc_priv.h +++ b/include/private/gc_priv.h @@ -170,6 +170,14 @@ typedef char * ptr_t; /* A generic pointer to which we can add */ # endif #endif /* !GC_ATTR_NO_SANITIZE_MEMORY */ +#ifndef GC_ATTR_NO_SANITIZE_THREAD +# ifdef THREAD_SANITIZER +# define GC_ATTR_NO_SANITIZE_THREAD __attribute__((no_sanitize("thread"))) +# else +# define GC_ATTR_NO_SANITIZE_THREAD /* empty */ +# endif +#endif /* !GC_ATTR_NO_SANITIZE_THREAD */ + #ifndef GC_ATTR_UNUSED # if GC_GNUC_PREREQ(3, 4) # define GC_ATTR_UNUSED __attribute__((__unused__)) diff --git a/include/private/gcconfig.h b/include/private/gcconfig.h index faa3f09..4b75146 100644 --- a/include/private/gcconfig.h +++ b/include/private/gcconfig.h @@ -3127,6 +3127,9 @@ # if __has_feature(memory_sanitizer) && !defined(MEMORY_SANITIZER) # define MEMORY_SANITIZER # endif +# if __has_feature(thread_sanitizer) && !defined(THREAD_SANITIZER) +# define THREAD_SANITIZER +# endif #endif #if defined(SPARC) diff --git a/mark.c b/mark.c index c0d1def..f538c56 100644 --- a/mark.c +++ b/mark.c @@ -50,7 +50,7 @@ void GC_noop6(word arg1 GC_ATTR_UNUSED, word arg2 GC_ATTR_UNUSED, /* Single argument version, robust against whole program analysis. */ volatile word GC_noop_sink; -GC_API void GC_CALL GC_noop1(word x) +GC_API void GC_CALL GC_noop1(word x) GC_ATTR_NO_SANITIZE_THREAD { GC_noop_sink = x; } diff --git a/misc.c b/misc.c index 399613c..c4f576d 100644 --- a/misc.c +++ b/misc.c @@ -344,6 +344,16 @@ GC_INNER void GC_extend_size_map(size_t i) } #endif +#ifdef THREADS + /* Used to occasionally clear a bigger chunk. */ + /* TODO: Should be more random than it is ... */ + static unsigned next_random_no(void) GC_ATTR_NO_SANITIZE_THREAD + { + static unsigned random_no = 0; + return ++random_no % 13; + } +#endif /* THREADS */ + /* Clear some of the inaccessible part of the stack. Returns its */ /* argument, so it can be used in a tail call position, hence clearing */ /* another frame. */ @@ -353,10 +363,6 @@ GC_API void * GC_CALL GC_clear_stack(void *arg) ptr_t sp = GC_approx_sp(); /* Hotter than actual sp */ # ifdef THREADS word volatile dummy[SMALL_CLEAR_SIZE]; - static unsigned random_no = 0; - /* Should be more random than it is ... */ - /* Used to occasionally clear a bigger */ - /* chunk. */ # endif # define SLOP 400 @@ -375,7 +381,7 @@ GC_API void * GC_CALL GC_clear_stack(void *arg) /* thus more junk remains accessible, thus the heap gets */ /* larger ... */ # ifdef THREADS - if (++random_no % 13 == 0) { + if (next_random_no() == 0) { ptr_t limit = sp; MAKE_HOTTER(limit, BIG_CLEAR_SIZE*sizeof(word)); diff --git a/os_dep.c b/os_dep.c index 2ca6f0a..690a4e5 100644 --- a/os_dep.c +++ b/os_dep.c @@ -3109,6 +3109,7 @@ GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void) GC_INNER volatile AO_TS_t GC_fault_handler_lock = AO_TS_INITIALIZER; static void async_set_pht_entry_from_index(volatile page_hash_table db, size_t index) + GC_ATTR_NO_SANITIZE_THREAD { while (AO_test_and_set_acquire(&GC_fault_handler_lock) == AO_TS_SET) { /* empty */ diff --git a/pthread_stop_world.c b/pthread_stop_world.c index be08b8f..4834a05 100644 --- a/pthread_stop_world.c +++ b/pthread_stop_world.c @@ -240,6 +240,12 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy, void *context); errno = old_errno; } +static void update_last_stop_count(GC_thread me, AO_t my_stop_count) + GC_ATTR_NO_SANITIZE_THREAD +{ + me -> stop_info.last_stop_count = my_stop_count; +} + STATIC void GC_suspend_handler_inner(ptr_t dummy GC_ATTR_UNUSED, void * context GC_ATTR_UNUSED) { @@ -308,7 +314,7 @@ STATIC void GC_suspend_handler_inner(ptr_t dummy GC_ATTR_UNUSED, /* thread has been stopped. Note that sem_post() is */ /* the only async-signal-safe primitive in LinuxThreads. */ sem_post(&GC_suspend_ack_sem); - me -> stop_info.last_stop_count = my_stop_count; + update_last_stop_count(me, my_stop_count); /* Wait until that thread tells us to restart by sending */ /* this thread a GC_sig_thr_restart signal (should be masked */ diff --git a/pthread_support.c b/pthread_support.c index 9da8014..659d603 100644 --- a/pthread_support.c +++ b/pthread_support.c @@ -1884,10 +1884,12 @@ STATIC void GC_pause(void) } #endif -#define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */ +#ifndef SPIN_MAX +# define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */ /* give up. */ +#endif -GC_INNER volatile GC_bool GC_collecting = 0; +GC_INNER volatile GC_bool GC_collecting = FALSE; /* A hint that we're in the collector and */ /* holding the allocation lock for an */ /* extended period. */ @@ -1955,6 +1957,18 @@ STATIC void GC_generic_lock(pthread_mutex_t * lock) #endif /* !USE_SPIN_LOCK || ... */ +#if defined(THREAD_SANITIZER) \ + && (defined(USE_SPIN_LOCK) || !defined(NO_PTHREAD_TRYLOCK)) + /* GC_collecting is a hint, a potential data race between */ + /* GC_lock() and ENTER/EXIT_GC() is OK to ignore. */ + static GC_bool is_collecting(void) GC_ATTR_NO_SANITIZE_THREAD + { + return GC_collecting; + } +#else +# define is_collecting() GC_collecting +#endif + #if defined(USE_SPIN_LOCK) /* Reasonably fast spin locks. Basically the same implementation */ @@ -1963,13 +1977,30 @@ STATIC void GC_generic_lock(pthread_mutex_t * lock) GC_INNER volatile AO_TS_t GC_allocate_lock = AO_TS_INITIALIZER; +# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */ +# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */ + static unsigned spin_max = low_spin_max; + static unsigned last_spins = 0; + + /* A potential data race between threads invoking GC_lock which reads */ + /* and updates spin_max and last_spins could be ignored because these */ + /* variables are hints only. (Atomic getters and setters are avoided */ + /* here for performance reasons.) */ + static void set_last_spins_and_high_spin_max(unsigned new_last_spins) + GC_ATTR_NO_SANITIZE_THREAD + { + last_spins = new_last_spins; + spin_max = high_spin_max; + } + + static void reset_spin_max(void) GC_ATTR_NO_SANITIZE_THREAD + { + spin_max = low_spin_max; + } + GC_INNER void GC_lock(void) { -# define low_spin_max 30 /* spin cycles if we suspect uniprocessor */ -# define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */ - static unsigned spin_max = low_spin_max; unsigned my_spin_max; - static unsigned last_spins = 0; unsigned my_last_spins; unsigned i; @@ -1979,7 +2010,8 @@ GC_INNER void GC_lock(void) my_spin_max = spin_max; my_last_spins = last_spins; for (i = 0; i < my_spin_max; i++) { - if (GC_collecting || GC_nprocs == 1) goto yield; + if (is_collecting() || GC_nprocs == 1) + goto yield; if (i < my_last_spins/2) { GC_pause(); continue; @@ -1991,13 +2023,12 @@ GC_INNER void GC_lock(void) * against the other process with which we were contending. * Thus it makes sense to spin longer the next time. */ - last_spins = i; - spin_max = high_spin_max; + set_last_spins_and_high_spin_max(i); return; } } /* We are probably being scheduled against the other process. Sleep. */ - spin_max = low_spin_max; + reset_spin_max(); yield: for (i = 0;; ++i) { if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) { @@ -2026,10 +2057,11 @@ yield: } #else /* !USE_SPIN_LOCK */ + GC_INNER void GC_lock(void) { #ifndef NO_PTHREAD_TRYLOCK - if (1 == GC_nprocs || GC_collecting) { + if (1 == GC_nprocs || is_collecting()) { pthread_mutex_lock(&GC_allocate_ml); } else { GC_generic_lock(&GC_allocate_ml); -- 2.7.4