From 8f5746e507098ad505af194d385d756675e26a90 Mon Sep 17 00:00:00 2001 From: Ivan Maidanski Date: Fri, 3 Nov 2017 20:08:09 +0300 Subject: [PATCH] Fix data race in a list referenced by A.aa (gctest) * tests/test.c [!AO_HAVE_load] (AO_load): Remove macro. * tests/test.c [!AO_HAVE_store] (AO_store): Likewise. * tests/test.c [!AO_HAVE_load_acquire] (AO_load_acquire): New static function (which uses FINALIZER_LOCK/FINALIZER_UNLOCK). * tests/test.c [!AO_HAVE_store_release] (AO_store_release): New macro (which uses FINALIZER_LOCK/FINALIZER_UNLOCK). * tests/test.c [!AO_HAVE_fetch_and_add1] (AO_fetch_and_add1): Add comment. * tests/test.c (a_set): Use AO_store_release() instead of AO_store(). * tests/test.c (a_get): Use AO_load_acquire() instead of AO_load(). * tests/test.c (reverse_test_inner): Improve comment about thread safety of a_set(reverse(reverse(a_get()))). --- tests/test.c | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/tests/test.c b/tests/test.c index c9b7323..3a208f8 100644 --- a/tests/test.c +++ b/tests/test.c @@ -175,14 +175,24 @@ /* AO_t not defined. */ # define AO_t GC_word #endif -#ifndef AO_HAVE_load -# define AO_load(p) (*(p)) +#ifndef AO_HAVE_load_acquire + static AO_t AO_load_acquire(const volatile AO_t *addr) + { + AO_t result; + + FINALIZER_LOCK(); + result = *addr; + FINALIZER_UNLOCK(); + return result; + } #endif -#ifndef AO_HAVE_store -# define AO_store(p, v) (void)(*(p) = (v)) +#ifndef AO_HAVE_store_release +# define AO_store_release(p, v) \ + (void)(FINALIZER_LOCK(), *(p) = (v), FINALIZER_UNLOCK(), 0) #endif #ifndef AO_HAVE_fetch_and_add1 # define AO_fetch_and_add1(p) ((*(p))++) + /* This is used only to update counters. */ #endif /* Allocation Statistics. Synchronization is not strictly necessary. */ @@ -641,8 +651,8 @@ volatile struct { char dummy; AO_t aa; } A; -#define a_set(p) AO_store(&A.aa, (AO_t)(p)) -#define a_get() (sexpr)AO_load(&A.aa) +#define a_set(p) AO_store_release(&A.aa, (AO_t)(p)) +#define a_get() (sexpr)AO_load_acquire(&A.aa) /* * Repeatedly reverse lists built out of very different sized cons cells. @@ -737,9 +747,11 @@ void *GC_CALLBACK reverse_test_inner(void *data) && (NTHREADS > 0) if (i % 10 == 0) fork_a_thread(); # endif - /* This maintains the invariant that a always points to a list of */ - /* 49 integers. Thus this is thread safe without locks, */ - /* assuming atomic pointer assignments. */ + /* This maintains the invariant that a always points to a list */ + /* of 49 integers. Thus, this is thread safe without locks, */ + /* assuming acquire/release barriers in a_get/set() and atomic */ + /* pointer assignments (otherwise, e.g., check_ints() may see */ + /* an uninitialized object returned by GC_MALLOC_STUBBORN). */ a_set(reverse(reverse(a_get()))); # if !defined(AT_END) && !defined(THREADS) /* This is not thread safe, since realloc explicitly deallocates */ -- 2.7.4