Update the TSan-specific atomics implementation to match the current API.
authoryurys@chromium.org <yurys@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 24 Jun 2013 09:35:30 +0000 (09:35 +0000)
committeryurys@chromium.org <yurys@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 24 Jun 2013 09:35:30 +0000 (09:35 +0000)
BUG=128314

Review URL: https://codereview.chromium.org/17591005

Patch from Alexander Potapenko <glider@chromium.org>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15279 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/atomicops_internals_tsan.h

index 6559336..e52c26c 100644 (file)
@@ -62,97 +62,162 @@ typedef short __tsan_atomic16;  // NOLINT
 typedef int   __tsan_atomic32;
 typedef long  __tsan_atomic64;  // NOLINT
 
+#if defined(__SIZEOF_INT128__) \
+    || (__clang_major__ * 100 + __clang_minor__ >= 302)
+typedef __int128 __tsan_atomic128;
+#define __TSAN_HAS_INT128 1
+#else
+typedef char     __tsan_atomic128;
+#define __TSAN_HAS_INT128 0
+#endif
+
 typedef enum {
-  __tsan_memory_order_relaxed = (1 << 0) + 100500,
-  __tsan_memory_order_consume = (1 << 1) + 100500,
-  __tsan_memory_order_acquire = (1 << 2) + 100500,
-  __tsan_memory_order_release = (1 << 3) + 100500,
-  __tsan_memory_order_acq_rel = (1 << 4) + 100500,
-  __tsan_memory_order_seq_cst = (1 << 5) + 100500,
+  __tsan_memory_order_relaxed,
+  __tsan_memory_order_consume,
+  __tsan_memory_order_acquire,
+  __tsan_memory_order_release,
+  __tsan_memory_order_acq_rel,
+  __tsan_memory_order_seq_cst,
 } __tsan_memory_order;
 
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+    __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
     __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
     __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
     __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
     __tsan_memory_order mo);
 
-void __tsan_atomic8_store(volatile __tsan_atomic8a, __tsan_atomic8 v,
+void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
     __tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16a, __tsan_atomic16 v,
+void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
     __tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32a, __tsan_atomic32 v,
+void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
     __tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64a, __tsan_atomic64 v,
+void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
     __tsan_memory_order mo);
+void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+    __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
     __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
     __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
     __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
     __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
     __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
     __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
     __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
     __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
     __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
     __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
     __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
 
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
-    __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
-    __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
-    __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
-    __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
-
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
-    __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
-    __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
-    __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
-    __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
+    __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+    __tsan_memory_order fail_mo);
+
+__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
+    volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
+    volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
+    volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
+    volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
+    volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+    __tsan_memory_order mo, __tsan_memory_order fail_mo);
 
 void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void __tsan_atomic_signal_fence(__tsan_memory_order mo);
 
 #ifdef __cplusplus
 }  // extern "C"
@@ -160,166 +225,166 @@ void __tsan_atomic_thread_fence(__tsan_memory_order mo);
 
 #endif  // #ifndef TSAN_INTERFACE_ATOMIC_H
 
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32ptr,
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
                                          Atomic32 old_value,
                                          Atomic32 new_value) {
   Atomic32 cmp = old_value;
   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
-                                          __tsan_memory_order_relaxed);
+      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
   return cmp;
 }
 
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32ptr,
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
                                          Atomic32 new_value) {
   return __tsan_atomic32_exchange(ptr, new_value,
-                                  __tsan_memory_order_relaxed);
+      __tsan_memory_order_relaxed);
 }
 
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32ptr,
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
                                        Atomic32 new_value) {
   return __tsan_atomic32_exchange(ptr, new_value,
-                                  __tsan_memory_order_acquire);
+      __tsan_memory_order_acquire);
 }
 
-inline Atomic32 Release_AtomicExchange(volatile Atomic32ptr,
+inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
                                        Atomic32 new_value) {
   return __tsan_atomic32_exchange(ptr, new_value,
-                                  __tsan_memory_order_release);
+      __tsan_memory_order_release);
 }
 
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32ptr,
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
                                           Atomic32 increment) {
   return increment + __tsan_atomic32_fetch_add(ptr, increment,
-                                               __tsan_memory_order_relaxed);
+      __tsan_memory_order_relaxed);
 }
 
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32ptr,
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
                                         Atomic32 increment) {
   return increment + __tsan_atomic32_fetch_add(ptr, increment,
-                                               __tsan_memory_order_acq_rel);
+      __tsan_memory_order_acq_rel);
 }
 
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32ptr,
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   Atomic32 cmp = old_value;
   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
-                                          __tsan_memory_order_acquire);
+      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
   return cmp;
 }
 
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32ptr,
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   Atomic32 cmp = old_value;
   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
-                                          __tsan_memory_order_release);
+      __tsan_memory_order_release, __tsan_memory_order_relaxed);
   return cmp;
 }
 
-inline void NoBarrier_Store(volatile Atomic32ptr, Atomic32 value) {
+inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
 }
 
-inline void Acquire_Store(volatile Atomic32ptr, Atomic32 value) {
+inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
 }
 
-inline void Release_Store(volatile Atomic32ptr, Atomic32 value) {
+inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
   __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
 }
 
-inline Atomic32 NoBarrier_Load(volatile const Atomic32ptr) {
+inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
 }
 
-inline Atomic32 Acquire_Load(volatile const Atomic32ptr) {
+inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
   return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32ptr) {
+inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64ptr,
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
                                          Atomic64 old_value,
                                          Atomic64 new_value) {
   Atomic64 cmp = old_value;
   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
-                                          __tsan_memory_order_relaxed);
+      __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
   return cmp;
 }
 
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64ptr,
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
                                          Atomic64 new_value) {
   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64ptr,
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
                                        Atomic64 new_value) {
   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
 }
 
-inline Atomic64 Release_AtomicExchange(volatile Atomic64ptr,
+inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
                                        Atomic64 new_value) {
   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
 }
 
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64ptr,
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
                                           Atomic64 increment) {
   return increment + __tsan_atomic64_fetch_add(ptr, increment,
-                                               __tsan_memory_order_relaxed);
+      __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64ptr,
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
                                         Atomic64 increment) {
   return increment + __tsan_atomic64_fetch_add(ptr, increment,
-                                               __tsan_memory_order_acq_rel);
+      __tsan_memory_order_acq_rel);
 }
 
-inline void NoBarrier_Store(volatile Atomic64ptr, Atomic64 value) {
+inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
 }
 
-inline void Acquire_Store(volatile Atomic64ptr, Atomic64 value) {
+inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
 }
 
-inline void Release_Store(volatile Atomic64ptr, Atomic64 value) {
+inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
   __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
 }
 
-inline Atomic64 NoBarrier_Load(volatile const Atomic64ptr) {
+inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 Acquire_Load(volatile const Atomic64ptr) {
+inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
   return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64ptr) {
+inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64ptr,
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
   Atomic64 cmp = old_value;
   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
-                                          __tsan_memory_order_acquire);
+      __tsan_memory_order_acquire, __tsan_memory_order_acquire);
   return cmp;
 }
 
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64ptr,
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
   Atomic64 cmp = old_value;
   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
-                                          __tsan_memory_order_release);
+      __tsan_memory_order_release, __tsan_memory_order_relaxed);
   return cmp;
 }