tsan: add atomic nand operation
authorDmitry Vyukov <dvyukov@google.com>
Mon, 26 Nov 2012 09:42:56 +0000 (09:42 +0000)
committerDmitry Vyukov <dvyukov@google.com>
Mon, 26 Nov 2012 09:42:56 +0000 (09:42 +0000)
llvm-svn: 168584

compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc
compiler-rt/lib/tsan/rtl/tsan_interface_atomic.h
compiler-rt/lib/tsan/rtl/tsan_stat.cc
compiler-rt/lib/tsan/rtl/tsan_stat.h

index 01d5bfa..2fb5aea 100644 (file)
@@ -197,6 +197,24 @@ static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
 }
 
 template<typename T>
+static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
+    morder mo) {
+  if (IsReleaseOrder(mo))
+    Release(thr, pc, (uptr)a);
+  T cmp = *a;
+  for (;;) {
+    T xch = ~cmp & v;
+    T cur = __sync_val_compare_and_swap(a, cmp, xch);
+    if (cmp == cur)
+      break;
+    cmp = cur;
+  }
+  if (IsAcquireOrder(mo))
+    Acquire(thr, pc, (uptr)a);
+  return v;
+}
+
+template<typename T>
 static bool AtomicCAS(ThreadState *thr, uptr pc,
     volatile T *a, T *c, T v, morder mo, morder fmo) {
   (void)fmo;
@@ -351,6 +369,22 @@ a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
   SCOPED_ATOMIC(FetchXor, a, v, mo);
 }
 
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
+  SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
+  SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
+  SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
+  SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
     morder mo, morder fmo) {
   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
index 2c1ce2e..996b0f7 100644 (file)
@@ -105,6 +105,15 @@ __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
 __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
     __tsan_atomic64 v, __tsan_memory_order mo);
 
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
+    __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
+    __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
+    __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
+    __tsan_atomic64 v, __tsan_memory_order mo);
+
 int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
     __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
index cb294bf..c155ae6 100644 (file)
@@ -77,6 +77,11 @@ void StatOutput(u64 *stat) {
   name[StatAtomicStore]                  = "            store                 ";
   name[StatAtomicExchange]               = "            exchange              ";
   name[StatAtomicFetchAdd]               = "            fetch_add             ";
+  name[StatAtomicFetchSub]               = "            fetch_sub             ";
+  name[StatAtomicFetchAnd]               = "            fetch_and             ";
+  name[StatAtomicFetchOr]                = "            fetch_or              ";
+  name[StatAtomicFetchXor]               = "            fetch_xor             ";
+  name[StatAtomicFetchNand]              = "            fetch_nand            ";
   name[StatAtomicCAS]                    = "            compare_exchange      ";
   name[StatAtomicFence]                  = "            fence                 ";
   name[StatAtomicRelaxed]                = "  Including relaxed               ";
index cf493e6..58bfd3e 100644 (file)
@@ -77,6 +77,7 @@ enum StatType {
   StatAtomicFetchAnd,
   StatAtomicFetchOr,
   StatAtomicFetchXor,
+  StatAtomicFetchNand,
   StatAtomicCAS,
   StatAtomicFence,
   StatAtomicRelaxed,