From: Dmitry Vyukov Date: Fri, 23 Nov 2012 15:51:45 +0000 (+0000) Subject: tsan: add failure memory order to atomic compare exchange functions X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=195eda99225b58091971dd034e7c7650de68f4a7;p=platform%2Fupstream%2Fllvm.git tsan: add failure memory order to atomic compare exchange functions llvm-svn: 168518 --- diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc index 89b0e15..01d5bfa 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc @@ -198,7 +198,8 @@ static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, template static bool AtomicCAS(ThreadState *thr, uptr pc, - volatile T *a, T *c, T v, morder mo) { + volatile T *a, T *c, T v, morder mo, morder fmo) { + (void)fmo; if (IsReleaseOrder(mo)) Release(thr, pc, (uptr)a); T cc = *c; @@ -213,8 +214,8 @@ static bool AtomicCAS(ThreadState *thr, uptr pc, template static T AtomicCAS(ThreadState *thr, uptr pc, - volatile T *a, T c, T v, morder mo) { - AtomicCAS(thr, pc, a, &c, v, mo); + volatile T *a, T c, T v, morder mo, morder fmo) { + AtomicCAS(thr, pc, a, &c, v, mo, fmo); return c; } @@ -351,62 +352,62 @@ a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) { } int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, - morder mo) { - SCOPED_ATOMIC(CAS, a, c, v, mo); + morder mo, morder fmo) { + SCOPED_ATOMIC(CAS, a, c, v, mo, fmo); } void __tsan_atomic_thread_fence(morder mo) { diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h index b8d81af..2c1ce2e 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h @@ -106,35 +106,43 @@ __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a, __tsan_atomic64 v, __tsan_memory_order mo); int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a, - __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo); + __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a, - __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo); + __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a, - __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo); + __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a, - __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo); + __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a, - __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo); + __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a, - __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo); + __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a, - __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo); + __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a, - __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo); + __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo, + __tsan_memory_order fail_mo); __tsan_atomic8 __tsan_atomic8_compare_exchange_val( volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v, - __tsan_memory_order mo); + __tsan_memory_order mo, __tsan_memory_order fail_mo); __tsan_atomic16 __tsan_atomic16_compare_exchange_val( volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v, - __tsan_memory_order mo); + __tsan_memory_order mo, __tsan_memory_order fail_mo); __tsan_atomic32 __tsan_atomic32_compare_exchange_val( volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v, - __tsan_memory_order mo); + __tsan_memory_order mo, __tsan_memory_order fail_mo); __tsan_atomic64 __tsan_atomic64_compare_exchange_val( volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v, - __tsan_memory_order mo); + __tsan_memory_order mo, __tsan_memory_order fail_mo); void __tsan_atomic_thread_fence(__tsan_memory_order mo); void __tsan_atomic_signal_fence(__tsan_memory_order mo);