Atomic ops: Sync with Chromium and add unit test.
authorjarin@chromium.org <jarin@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 10 Mar 2014 06:43:21 +0000 (06:43 +0000)
committerjarin@chromium.org <jarin@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 10 Mar 2014 06:43:21 +0000 (06:43 +0000)
R=jarin@chromium.org

Review URL: https://codereview.chromium.org/129813008

Patch from Cosmin Truta <ctruta@gmail.com>.

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@19738 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/atomicops.h
src/atomicops_internals_arm_gcc.h
src/atomicops_internals_atomicword_compat.h [new file with mode: 0644]
src/atomicops_internals_mac.h [moved from src/atomicops_internals_x86_macosx.h with 63% similarity]
src/atomicops_internals_tsan.h
src/atomicops_internals_x86_msvc.h
test/cctest/cctest.gyp
test/cctest/test-atomicops.cc [new file with mode: 0644]

index d7d4df6..aa2b22f 100644 (file)
 #include "../include/v8.h"
 #include "globals.h"
 
+#if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
 namespace v8 {
 namespace internal {
 
@@ -58,9 +67,7 @@ typedef int32_t Atomic32;
 #ifdef V8_HOST_ARCH_64_BIT
 // We need to be able to go between Atomic64 and AtomicWord implicitly.  This
 // means Atomic64 and AtomicWord should be the same type on 64-bit.
-#if defined(__ILP32__) || defined(__APPLE__)
-// MacOS is an exception to the implicit conversion rule above,
-// because it uses long for intptr_t.
+#if defined(__ILP32__)
 typedef int64_t Atomic64;
 #else
 typedef intptr_t Atomic64;
@@ -69,11 +76,7 @@ typedef intptr_t Atomic64;
 
 // Use AtomicWord for a machine-sized pointer.  It will use the Atomic32 or
 // Atomic64 routines below, depending on your architecture.
-#if defined(__OpenBSD__) && defined(__i386__)
-typedef Atomic32 AtomicWord;
-#else
 typedef intptr_t AtomicWord;
-#endif
 
 // Atomically execute:
 //      result = *ptr;
@@ -155,18 +158,24 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
 #include "atomicops_internals_tsan.h"
 #elif defined(_MSC_VER) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
 #include "atomicops_internals_x86_msvc.h"
-#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include "atomicops_internals_x86_macosx.h"
-#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-#include "atomicops_internals_x86_gcc.h"
+#elif defined(__APPLE__)
+#include "atomicops_internals_mac.h"
 #elif defined(__GNUC__) && V8_HOST_ARCH_A64
 #include "atomicops_internals_a64_gcc.h"
 #elif defined(__GNUC__) && V8_HOST_ARCH_ARM
 #include "atomicops_internals_arm_gcc.h"
+#elif defined(__GNUC__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+#include "atomicops_internals_x86_gcc.h"
 #elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
 #include "atomicops_internals_mips_gcc.h"
 #else
 #error "Atomic operations are not supported on your platform"
 #endif
 
+// On some platforms we need additional declarations to make
+// AtomicWord compatible with our other Atomic* types.
+#if defined(__APPLE__) || defined(__OpenBSD__)
+#include "atomicops_internals_atomicword_compat.h"
+#endif
+
 #endif  // V8_ATOMICOPS_H_
index 6c30256..918920d 100644 (file)
 #ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
 #define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
 
+#if defined(__QNXNTO__)
+#include <sys/cpuinline.h>
+#endif
+
 namespace v8 {
 namespace internal {
 
-// 0xffff0fc0 is the hard coded address of a function provided by
-// the kernel which implements an atomic compare-exchange. On older
-// ARM architecture revisions (pre-v6) this may be implemented using
-// a syscall. This address is stable, and in active use (hard coded)
-// by at least glibc-2.7 and the Android C library.
-typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
-                                           Atomic32 new_value,
-                                           volatile Atomic32* ptr);
-LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
-    (LinuxKernelCmpxchgFunc) 0xffff0fc0;
+// Memory barriers on ARM are funky, but the kernel is here to help:
+//
+// * ARMv5 didn't support SMP, there is no memory barrier instruction at
+//   all on this architecture, or when targeting its machine code.
+//
+// * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by
+//   writing a random value to a very specific coprocessor register.
+//
+// * On ARMv7, the "dmb" instruction is used to perform a full memory
+//   barrier (though writing to the co-processor will still work).
+//   However, on single core devices (e.g. Nexus One, or Nexus S),
+//   this instruction will take up to 200 ns, which is huge, even though
+//   it's completely un-needed on these devices.
+//
+// * There is no easy way to determine at runtime if the device is
+//   single or multi-core. However, the kernel provides a useful helper
+//   function at a fixed memory address (0xffff0fa0), which will always
+//   perform a memory barrier in the most efficient way. I.e. on single
+//   core devices, this is an empty function that exits immediately.
+//   On multi-core devices, it implements a full memory barrier.
+//
+// * This source could be compiled to ARMv5 machine code that runs on a
+//   multi-core ARMv6 or ARMv7 device. In this case, memory barriers
+//   are needed for correct execution. Always call the kernel helper, even
+//   when targeting ARMv5TE.
+//
 
-typedef void (*LinuxKernelMemoryBarrierFunc)(void);
-LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
-    (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
+inline void MemoryBarrier() {
+#if defined(__linux__) || defined(__ANDROID__)
+  // Note: This is a function call, which is also an implicit compiler barrier.
+  typedef void (*KernelMemoryBarrierFunc)();
+  ((KernelMemoryBarrierFunc)0xffff0fa0)();
+#elif defined(__QNXNTO__)
+  __cpu_membarrier();
+#else
+#error MemoryBarrier() is not implemented on this platform.
+#endif
+}
 
+// An ARM toolchain would only define one of these depending on which
+// variant of the target architecture is being used. This tests against
+// any known ARMv6 or ARMv7 variant, where it is possible to directly
+// use ldrex/strex instructions to implement fast atomic operations.
+#if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+    defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
+    defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
+    defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
+    defined(__ARM_ARCH_6KZ__) || defined(__ARM_ARCH_6T2__)
 
 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
                                          Atomic32 old_value,
                                          Atomic32 new_value) {
-  Atomic32 prev_value = *ptr;
+  Atomic32 prev_value;
+  int reloop;
   do {
-    if (!pLinuxKernelCmpxchg(old_value, new_value,
-                             const_cast<Atomic32*>(ptr))) {
-      return old_value;
-    }
-    prev_value = *ptr;
-  } while (prev_value == old_value);
+    // The following is equivalent to:
+    //
+    //   prev_value = LDREX(ptr)
+    //   reloop = 0
+    //   if (prev_value != old_value)
+    //      reloop = STREX(ptr, new_value)
+    __asm__ __volatile__("    ldrex %0, [%3]\n"
+                         "    mov %1, #0\n"
+                         "    cmp %0, %4\n"
+#ifdef __thumb2__
+                         "    it eq\n"
+#endif
+                         "    strexeq %1, %5, [%3]\n"
+                         : "=&r"(prev_value), "=&r"(reloop), "+m"(*ptr)
+                         : "r"(ptr), "r"(old_value), "r"(new_value)
+                         : "cc", "memory");
+  } while (reloop != 0);
   return prev_value;
 }
 
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 result = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+  return result;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 value;
+  int reloop;
+  do {
+    // Equivalent to:
+    //
+    //  value = LDREX(ptr)
+    //  value += increment
+    //  reloop = STREX(ptr, value)
+    //
+    __asm__ __volatile__("    ldrex %0, [%3]\n"
+                         "    add %0, %0, %4\n"
+                         "    strex %1, %0, [%3]\n"
+                         : "=&r"(value), "=&r"(reloop), "+m"(*ptr)
+                         : "r"(ptr), "r"(increment)
+                         : "cc", "memory");
+  } while (reloop);
+  return value;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  // TODO(digit): Investigate if it's possible to implement this with
+  // a single MemoryBarrier() operation between the LDREX and STREX.
+  // See http://crbug.com/246514
+  MemoryBarrier();
+  Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+  return result;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 old_value;
+  int reloop;
+  do {
+    // old_value = LDREX(ptr)
+    // reloop = STREX(ptr, new_value)
+    __asm__ __volatile__("   ldrex %0, [%3]\n"
+                         "   strex %1, %4, [%3]\n"
+                         : "=&r"(old_value), "=&r"(reloop), "+m"(*ptr)
+                         : "r"(ptr), "r"(new_value)
+                         : "cc", "memory");
+  } while (reloop != 0);
+  return old_value;
+}
+
+// This tests against any known ARMv5 variant.
+#elif defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) || \
+      defined(__ARM_ARCH_5TE__) || defined(__ARM_ARCH_5TEJ__)
+
+// The kernel also provides a helper function to perform an atomic
+// compare-and-swap operation at the hard-wired address 0xffff0fc0.
+// On ARMv5, this is implemented by a special code path that the kernel
+// detects and treats specially when thread pre-emption happens.
+// On ARMv6 and higher, it uses LDREX/STREX instructions instead.
+//
+// Note that this always perform a full memory barrier, there is no
+// need to add calls MemoryBarrier() before or after it. It also
+// returns 0 on success, and 1 on exit.
+//
+// Available and reliable since Linux 2.6.24. Both Android and ChromeOS
+// use newer kernel revisions, so this should not be a concern.
+namespace {
+
+inline int LinuxKernelCmpxchg(Atomic32 old_value,
+                              Atomic32 new_value,
+                              volatile Atomic32* ptr) {
+  typedef int (*KernelCmpxchgFunc)(Atomic32, Atomic32, volatile Atomic32*);
+  return ((KernelCmpxchgFunc)0xffff0fc0)(old_value, new_value, ptr);
+}
+
+}  // namespace
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev_value;
+  for (;;) {
+    prev_value = *ptr;
+    if (prev_value != old_value)
+      return prev_value;
+    if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+      return old_value;
+  }
+}
+
 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
                                          Atomic32 new_value) {
   Atomic32 old_value;
   do {
     old_value = *ptr;
-  } while (pLinuxKernelCmpxchg(old_value, new_value,
-                               const_cast<Atomic32*>(ptr)));
+  } while (LinuxKernelCmpxchg(old_value, new_value, ptr));
   return old_value;
 }
 
@@ -86,8 +237,7 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
     // Atomic exchange the old value with an incremented one.
     Atomic32 old_value = *ptr;
     Atomic32 new_value = old_value + increment;
-    if (pLinuxKernelCmpxchg(old_value, new_value,
-                            const_cast<Atomic32*>(ptr)) == 0) {
+    if (!LinuxKernelCmpxchg(old_value, new_value, ptr)) {
       // The exchange took place as expected.
       return new_value;
     }
@@ -98,23 +248,46 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  Atomic32 prev_value;
+  for (;;) {
+    prev_value = *ptr;
+    if (prev_value != old_value) {
+      // Always ensure acquire semantics.
+      MemoryBarrier();
+      return prev_value;
+    }
+    if (!LinuxKernelCmpxchg(old_value, new_value, ptr))
+      return old_value;
+  }
 }
 
 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
-  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  // This could be implemented as:
+  //    MemoryBarrier();
+  //    return NoBarrier_CompareAndSwap();
+  //
+  // But would use 3 barriers per succesful CAS. To save performance,
+  // use Acquire_CompareAndSwap(). Its implementation guarantees that:
+  // - A succesful swap uses only 2 barriers (in the kernel helper).
+  // - An early return due to (prev_value != old_value) performs
+  //   a memory barrier with no store, which is equivalent to the
+  //   generic implementation above.
+  return Acquire_CompareAndSwap(ptr, old_value, new_value);
 }
 
+#else
+#  error "Your CPU's ARM architecture is not supported yet"
+#endif
+
+// NOTE: Atomicity of the following load and store operations is only
+// guaranteed in case of 32-bit alignement of |ptr| values.
+
 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
   *ptr = value;
 }
 
-inline void MemoryBarrier() {
-  pLinuxKernelMemoryBarrier();
-}
-
 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
   *ptr = value;
   MemoryBarrier();
@@ -125,9 +298,7 @@ inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
   *ptr = value;
 }
 
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
-  return *ptr;
-}
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
 
 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
   Atomic32 value = *ptr;
diff --git a/src/atomicops_internals_atomicword_compat.h b/src/atomicops_internals_atomicword_compat.h
new file mode 100644 (file)
index 0000000..5934f70
--- /dev/null
@@ -0,0 +1,122 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+#define V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
+
+// AtomicWord is a synonym for intptr_t, and Atomic32 is a synonym for int32,
+// which in turn means int. On some LP32 platforms, intptr_t is an int, but
+// on others, it's a long. When AtomicWord and Atomic32 are based on different
+// fundamental types, their pointers are incompatible.
+//
+// This file defines function overloads to allow both AtomicWord and Atomic32
+// data to be used with this interface.
+//
+// On LP64 platforms, AtomicWord and Atomic64 are both always long,
+// so this problem doesn't occur.
+
+#if !defined(V8_HOST_ARCH_64_BIT)
+
+namespace v8 {
+namespace internal {
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+                                           AtomicWord old_value,
+                                           AtomicWord new_value) {
+  return NoBarrier_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+                                           AtomicWord new_value) {
+  return NoBarrier_AtomicExchange(
+      reinterpret_cast<volatile Atomic32*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                            AtomicWord increment) {
+  return NoBarrier_AtomicIncrement(
+      reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+                                          AtomicWord increment) {
+  return Barrier_AtomicIncrement(
+      reinterpret_cast<volatile Atomic32*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return v8::internal::Acquire_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+                                         AtomicWord old_value,
+                                         AtomicWord new_value) {
+  return v8::internal::Release_CompareAndSwap(
+      reinterpret_cast<volatile Atomic32*>(ptr), old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+  NoBarrier_Store(
+      reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return v8::internal::Acquire_Store(
+      reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+  return v8::internal::Release_Store(
+      reinterpret_cast<volatile Atomic32*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+  return NoBarrier_Load(
+      reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+  return v8::internal::Acquire_Load(
+      reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+  return v8::internal::Release_Load(
+      reinterpret_cast<volatile const Atomic32*>(ptr));
+}
+
+} }  // namespace v8::internal
+
+#endif  // !defined(V8_HOST_ARCH_64_BIT)
+
+#endif  // V8_ATOMICOPS_INTERNALS_ATOMICWORD_COMPAT_H_
similarity index 63%
rename from src/atomicops_internals_x86_macosx.h
rename to src/atomicops_internals_mac.h
index bfb02b3..4bd0c09 100644 (file)
@@ -27,8 +27,8 @@
 
 // This file is an internal atomic implementation, use atomicops.h instead.
 
-#ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
-#define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+#ifndef V8_ATOMICOPS_INTERNALS_MAC_H_
+#define V8_ATOMICOPS_INTERNALS_MAC_H_
 
 #include <libkern/OSAtomic.h>
 
@@ -65,7 +65,7 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
 }
 
 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
-                                          Atomic32 increment) {
+                                        Atomic32 increment) {
   return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
 }
 
@@ -132,7 +132,7 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
   Atomic64 prev_value;
   do {
     if (OSAtomicCompareAndSwap64(old_value, new_value,
-                                 const_cast<Atomic64*>(ptr))) {
+                                 reinterpret_cast<volatile int64_t*>(ptr))) {
       return old_value;
     }
     prev_value = *ptr;
@@ -146,18 +146,19 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
   do {
     old_value = *ptr;
   } while (!OSAtomicCompareAndSwap64(old_value, new_value,
-                                     const_cast<Atomic64*>(ptr)));
+                                     reinterpret_cast<volatile int64_t*>(ptr)));
   return old_value;
 }
 
 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
                                           Atomic64 increment) {
-  return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
+  return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
 }
 
 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
                                         Atomic64 increment) {
-  return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
+  return OSAtomicAdd64Barrier(increment,
+                              reinterpret_cast<volatile int64_t*>(ptr));
 }
 
 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
@@ -165,8 +166,8 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
                                        Atomic64 new_value) {
   Atomic64 prev_value;
   do {
-    if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
-                                        const_cast<Atomic64*>(ptr))) {
+    if (OSAtomicCompareAndSwap64Barrier(
+        old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
       return old_value;
     }
     prev_value = *ptr;
@@ -213,89 +214,6 @@ inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
 
 #endif  // defined(__LP64__)
 
-// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
-// on the Mac, even when they are the same size.  We need to explicitly cast
-// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
-#ifdef __LP64__
-#define AtomicWordCastType Atomic64
-#else
-#define AtomicWordCastType Atomic32
-#endif
-
-inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
-                                           AtomicWord old_value,
-                                           AtomicWord new_value) {
-  return NoBarrier_CompareAndSwap(
-      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
-      old_value, new_value);
-}
-
-inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
-                                           AtomicWord new_value) {
-  return NoBarrier_AtomicExchange(
-      reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
-}
-
-inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
-                                            AtomicWord increment) {
-  return NoBarrier_AtomicIncrement(
-      reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
-                                          AtomicWord increment) {
-  return Barrier_AtomicIncrement(
-      reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
-}
-
-inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
-                                         AtomicWord old_value,
-                                         AtomicWord new_value) {
-  return v8::internal::Acquire_CompareAndSwap(
-      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
-      old_value, new_value);
-}
-
-inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
-                                         AtomicWord old_value,
-                                         AtomicWord new_value) {
-  return v8::internal::Release_CompareAndSwap(
-      reinterpret_cast<volatile AtomicWordCastType*>(ptr),
-      old_value, new_value);
-}
-
-inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) {
-  NoBarrier_Store(
-      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
-  return v8::internal::Acquire_Store(
-      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
-  return v8::internal::Release_Store(
-      reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
-}
-
-inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) {
-  return NoBarrier_Load(
-      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
-  return v8::internal::Acquire_Load(
-      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
-  return v8::internal::Release_Load(
-      reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
-}
-
-#undef AtomicWordCastType
-
 } }  // namespace v8::internal
 
-#endif  // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+#endif  // V8_ATOMICOPS_INTERNALS_MAC_H_
index b5162ba..1819798 100644 (file)
@@ -53,10 +53,7 @@ extern struct AtomicOps_x86CPUFeatureStruct
 
 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
 
-#ifdef __cplusplus
 extern "C" {
-#endif
-
 typedef char  __tsan_atomic8;
 typedef short __tsan_atomic16;  // NOLINT
 typedef int   __tsan_atomic32;
@@ -80,152 +77,149 @@ typedef enum {
   __tsan_memory_order_seq_cst,
 } __tsan_memory_order;
 
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8a,
     __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16a,
     __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32a,
     __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64a,
     __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128a,
     __tsan_memory_order mo);
 
-void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
+void __tsan_atomic8_store(volatile __tsan_atomic8a, __tsan_atomic8 v,
     __tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
+void __tsan_atomic16_store(volatile __tsan_atomic16a, __tsan_atomic16 v,
     __tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
+void __tsan_atomic32_store(volatile __tsan_atomic32a, __tsan_atomic32 v,
     __tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
+void __tsan_atomic64_store(volatile __tsan_atomic64a, __tsan_atomic64 v,
     __tsan_memory_order mo);
-void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+void __tsan_atomic128_store(volatile __tsan_atomic128a, __tsan_atomic128 v,
     __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8a,
     __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16a,
     __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32a,
     __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64a,
     __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128a,
     __tsan_atomic128 v, __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8a,
     __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16a,
     __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32a,
     __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64a,
     __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128a,
     __tsan_atomic128 v, __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8a,
     __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16a,
     __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32a,
     __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64a,
     __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128a,
     __tsan_atomic128 v, __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8a,
     __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16a,
     __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32a,
     __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64a,
     __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128a,
     __tsan_atomic128 v, __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8a,
     __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16a,
     __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32a,
     __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64a,
     __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128a,
     __tsan_atomic128 v, __tsan_memory_order mo);
 
-__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8a,
     __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16a,
     __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32a,
     __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
-    __tsan_atomic64 v, __tsan_memory_order mo);
-__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a,
     __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a,
+    __tsan_atomic128 v, __tsan_memory_order mo);
 
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
-    __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8a,
+    __tsan_atomic8c, __tsan_atomic8 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
-    __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16a,
+    __tsan_atomic16c, __tsan_atomic16 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
-    __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32a,
+    __tsan_atomic32c, __tsan_atomic32 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
-    __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64a,
+    __tsan_atomic64c, __tsan_atomic64 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
-    __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128a,
+    __tsan_atomic128c, __tsan_atomic128 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
 
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
-    __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8a,
+    __tsan_atomic8c, __tsan_atomic8 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
-    __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16a,
+    __tsan_atomic16c, __tsan_atomic16 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
-    __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32a,
+    __tsan_atomic32c, __tsan_atomic32 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
-    __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64a,
+    __tsan_atomic64c, __tsan_atomic64 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
-int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
-    __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128a,
+    __tsan_atomic128c, __tsan_atomic128 v, __tsan_memory_order mo,
     __tsan_memory_order fail_mo);
 
 __tsan_atomic8 __tsan_atomic8_compare_exchange_val(
-    volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+    volatile __tsan_atomic8a, __tsan_atomic8 c, __tsan_atomic8 v,
     __tsan_memory_order mo, __tsan_memory_order fail_mo);
 __tsan_atomic16 __tsan_atomic16_compare_exchange_val(
-    volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+    volatile __tsan_atomic16a, __tsan_atomic16 c, __tsan_atomic16 v,
     __tsan_memory_order mo, __tsan_memory_order fail_mo);
 __tsan_atomic32 __tsan_atomic32_compare_exchange_val(
-    volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+    volatile __tsan_atomic32a, __tsan_atomic32 c, __tsan_atomic32 v,
     __tsan_memory_order mo, __tsan_memory_order fail_mo);
 __tsan_atomic64 __tsan_atomic64_compare_exchange_val(
-    volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+    volatile __tsan_atomic64a, __tsan_atomic64 c, __tsan_atomic64 v,
     __tsan_memory_order mo, __tsan_memory_order fail_mo);
 __tsan_atomic128 __tsan_atomic128_compare_exchange_val(
-    volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+    volatile __tsan_atomic128a, __tsan_atomic128 c, __tsan_atomic128 v,
     __tsan_memory_order mo, __tsan_memory_order fail_mo);
 
 void __tsan_atomic_thread_fence(__tsan_memory_order mo);
 void __tsan_atomic_signal_fence(__tsan_memory_order mo);
-
-#ifdef __cplusplus
 }  // extern "C"
-#endif
 
 #endif  // #ifndef TSAN_INTERFACE_ATOMIC_H
 
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32ptr,
                                          Atomic32 old_value,
                                          Atomic32 new_value) {
   Atomic32 cmp = old_value;
@@ -234,37 +228,37 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
   return cmp;
 }
 
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32ptr,
                                          Atomic32 new_value) {
   return __tsan_atomic32_exchange(ptr, new_value,
       __tsan_memory_order_relaxed);
 }
 
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32ptr,
                                        Atomic32 new_value) {
   return __tsan_atomic32_exchange(ptr, new_value,
       __tsan_memory_order_acquire);
 }
 
-inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
+inline Atomic32 Release_AtomicExchange(volatile Atomic32ptr,
                                        Atomic32 new_value) {
   return __tsan_atomic32_exchange(ptr, new_value,
       __tsan_memory_order_release);
 }
 
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32ptr,
                                           Atomic32 increment) {
   return increment + __tsan_atomic32_fetch_add(ptr, increment,
       __tsan_memory_order_relaxed);
 }
 
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32ptr,
                                         Atomic32 increment) {
   return increment + __tsan_atomic32_fetch_add(ptr, increment,
       __tsan_memory_order_acq_rel);
 }
 
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   Atomic32 cmp = old_value;
@@ -273,7 +267,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
   return cmp;
 }
 
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32ptr,
                                        Atomic32 old_value,
                                        Atomic32 new_value) {
   Atomic32 cmp = old_value;
@@ -282,33 +276,33 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
   return cmp;
 }
 
-inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void NoBarrier_Store(volatile Atomic32ptr, Atomic32 value) {
   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
 }
 
-inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Acquire_Store(volatile Atomic32ptr, Atomic32 value) {
   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
 }
 
-inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+inline void Release_Store(volatile Atomic32ptr, Atomic32 value) {
   __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
 }
 
-inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 NoBarrier_Load(volatile const Atomic32ptr) {
   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
 }
 
-inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Acquire_Load(volatile const Atomic32ptr) {
   return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
 }
 
-inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+inline Atomic32 Release_Load(volatile const Atomic32ptr) {
   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64ptr,
                                          Atomic64 old_value,
                                          Atomic64 new_value) {
   Atomic64 cmp = old_value;
@@ -317,60 +311,60 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
   return cmp;
 }
 
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64ptr,
                                          Atomic64 new_value) {
   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64ptr,
                                        Atomic64 new_value) {
   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
 }
 
-inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
+inline Atomic64 Release_AtomicExchange(volatile Atomic64ptr,
                                        Atomic64 new_value) {
   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
 }
 
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64ptr,
                                           Atomic64 increment) {
   return increment + __tsan_atomic64_fetch_add(ptr, increment,
       __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64ptr,
                                         Atomic64 increment) {
   return increment + __tsan_atomic64_fetch_add(ptr, increment,
       __tsan_memory_order_acq_rel);
 }
 
-inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void NoBarrier_Store(volatile Atomic64ptr, Atomic64 value) {
   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
 }
 
-inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Acquire_Store(volatile Atomic64ptr, Atomic64 value) {
   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
 }
 
-inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+inline void Release_Store(volatile Atomic64ptr, Atomic64 value) {
   __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
 }
 
-inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 NoBarrier_Load(volatile const Atomic64ptr) {
   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Acquire_Load(volatile const Atomic64ptr) {
   return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
 }
 
-inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+inline Atomic64 Release_Load(volatile const Atomic64ptr) {
   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
 }
 
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
   Atomic64 cmp = old_value;
@@ -379,7 +373,7 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
   return cmp;
 }
 
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64ptr,
                                        Atomic64 old_value,
                                        Atomic64 new_value) {
   Atomic64 cmp = old_value;
index fcf6a65..ad9cf9d 100644 (file)
 #include "checks.h"
 #include "win32-headers.h"
 
+#if defined(V8_HOST_ARCH_64_BIT)
+// windows.h #defines this (only on x64). This causes problems because the
+// public API also uses MemoryBarrier at the public name for this fence. So, on
+// X64, undef it, and call its documented
+// (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
+// implementation directly.
+#undef MemoryBarrier
+#endif
+
 namespace v8 {
 namespace internal {
 
@@ -70,8 +79,13 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
 #error "We require at least vs2005 for MemoryBarrier"
 #endif
 inline void MemoryBarrier() {
+#if defined(V8_HOST_ARCH_64_BIT)
+  // See #undef and note at the top of this file.
+  __faststorefence();
+#else
   // We use MemoryBarrier from WinNT.h
   ::MemoryBarrier();
+#endif
 }
 
 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
index ed203d3..252fdf7 100644 (file)
@@ -53,6 +53,7 @@
         'test-alloc.cc',
         'test-api.cc',
         'test-ast.cc',
+        'test-atomicops.cc',
         'test-bignum.cc',
         'test-bignum-dtoa.cc',
         'test-circular-queue.cc',
diff --git a/test/cctest/test-atomicops.cc b/test/cctest/test-atomicops.cc
new file mode 100644 (file)
index 0000000..eba956c
--- /dev/null
@@ -0,0 +1,276 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "atomicops.h"
+
+using namespace v8::internal;
+
+
+#define CHECK_EQU(v1, v2) \
+  CHECK_EQ(static_cast<int64_t>(v1), static_cast<int64_t>(v2))
+
+#define NUM_BITS(T) (sizeof(T) * 8)
+
+
+template <class AtomicType>
+static void TestAtomicIncrement() {
+  // For now, we just test the single-threaded execution.
+
+  // Use a guard value to make sure that NoBarrier_AtomicIncrement doesn't
+  // go outside the expected address bounds.  This is to test that the
+  // 32-bit NoBarrier_AtomicIncrement doesn't do the wrong thing on 64-bit
+  // machines.
+  struct {
+    AtomicType prev_word;
+    AtomicType count;
+    AtomicType next_word;
+  } s;
+
+  AtomicType prev_word_value, next_word_value;
+  memset(&prev_word_value, 0xFF, sizeof(AtomicType));
+  memset(&next_word_value, 0xEE, sizeof(AtomicType));
+
+  s.prev_word = prev_word_value;
+  s.count = 0;
+  s.next_word = next_word_value;
+
+  CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 1), 1);
+  CHECK_EQU(s.count, 1);
+  CHECK_EQU(s.prev_word, prev_word_value);
+  CHECK_EQU(s.next_word, next_word_value);
+
+  CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 2), 3);
+  CHECK_EQU(s.count, 3);
+  CHECK_EQU(s.prev_word, prev_word_value);
+  CHECK_EQU(s.next_word, next_word_value);
+
+  CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 3), 6);
+  CHECK_EQU(s.count, 6);
+  CHECK_EQU(s.prev_word, prev_word_value);
+  CHECK_EQU(s.next_word, next_word_value);
+
+  CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -3), 3);
+  CHECK_EQU(s.count, 3);
+  CHECK_EQU(s.prev_word, prev_word_value);
+  CHECK_EQU(s.next_word, next_word_value);
+
+  CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -2), 1);
+  CHECK_EQU(s.count, 1);
+  CHECK_EQU(s.prev_word, prev_word_value);
+  CHECK_EQU(s.next_word, next_word_value);
+
+  CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -1), 0);
+  CHECK_EQU(s.count, 0);
+  CHECK_EQU(s.prev_word, prev_word_value);
+  CHECK_EQU(s.next_word, next_word_value);
+
+  CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -1), -1);
+  CHECK_EQU(s.count, -1);
+  CHECK_EQU(s.prev_word, prev_word_value);
+  CHECK_EQU(s.next_word, next_word_value);
+
+  CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, -4), -5);
+  CHECK_EQU(s.count, -5);
+  CHECK_EQU(s.prev_word, prev_word_value);
+  CHECK_EQU(s.next_word, next_word_value);
+
+  CHECK_EQU(NoBarrier_AtomicIncrement(&s.count, 5), 0);
+  CHECK_EQU(s.count, 0);
+  CHECK_EQU(s.prev_word, prev_word_value);
+  CHECK_EQU(s.next_word, next_word_value);
+}
+
+
+template <class AtomicType>
+static void TestCompareAndSwap() {
+  AtomicType value = 0;
+  AtomicType prev = NoBarrier_CompareAndSwap(&value, 0, 1);
+  CHECK_EQU(1, value);
+  CHECK_EQU(0, prev);
+
+  // Use a test value that has non-zero bits in both halves, for testing
+  // the 64-bit implementation on 32-bit platforms.
+  const AtomicType k_test_val =
+      (static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 2)) + 11;
+  value = k_test_val;
+  prev = NoBarrier_CompareAndSwap(&value, 0, 5);
+  CHECK_EQU(k_test_val, value);
+  CHECK_EQU(k_test_val, prev);
+
+  value = k_test_val;
+  prev = NoBarrier_CompareAndSwap(&value, k_test_val, 5);
+  CHECK_EQU(5, value);
+  CHECK_EQU(k_test_val, prev);
+}
+
+
+template <class AtomicType>
+static void TestAtomicExchange() {
+  AtomicType value = 0;
+  AtomicType new_value = NoBarrier_AtomicExchange(&value, 1);
+  CHECK_EQU(1, value);
+  CHECK_EQU(0, new_value);
+
+  // Use a test value that has non-zero bits in both halves, for testing
+  // the 64-bit implementation on 32-bit platforms.
+  const AtomicType k_test_val =
+      (static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 2)) + 11;
+  value = k_test_val;
+  new_value = NoBarrier_AtomicExchange(&value, k_test_val);
+  CHECK_EQU(k_test_val, value);
+  CHECK_EQU(k_test_val, new_value);
+
+  value = k_test_val;
+  new_value = NoBarrier_AtomicExchange(&value, 5);
+  CHECK_EQU(5, value);
+  CHECK_EQU(k_test_val, new_value);
+}
+
+
+template <class AtomicType>
+static void TestAtomicIncrementBounds() {
+  // Test at rollover boundary between int_max and int_min.
+  AtomicType test_val =
+      static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) - 1);
+  AtomicType value = -1 ^ test_val;
+  AtomicType new_value = NoBarrier_AtomicIncrement(&value, 1);
+  CHECK_EQU(test_val, value);
+  CHECK_EQU(value, new_value);
+
+  NoBarrier_AtomicIncrement(&value, -1);
+  CHECK_EQU(-1 ^ test_val, value);
+
+  // Test at 32-bit boundary for 64-bit atomic type.
+  test_val = static_cast<AtomicType>(1) << (NUM_BITS(AtomicType) / 2);
+  value = test_val - 1;
+  new_value = NoBarrier_AtomicIncrement(&value, 1);
+  CHECK_EQU(test_val, value);
+  CHECK_EQU(value, new_value);
+
+  NoBarrier_AtomicIncrement(&value, -1);
+  CHECK_EQU(test_val - 1, value);
+}
+
+
+// Return an AtomicType with the value 0xa5a5a5..
+template <class AtomicType>
+static AtomicType TestFillValue() {
+  AtomicType val = 0;
+  memset(&val, 0xa5, sizeof(AtomicType));
+  return val;
+}
+
+
+// This is a simple sanity check to ensure that values are correct.
+// Not testing atomicity.
+template <class AtomicType>
+static void TestStore() {
+  const AtomicType kVal1 = TestFillValue<AtomicType>();
+  const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+  AtomicType value;
+
+  NoBarrier_Store(&value, kVal1);
+  CHECK_EQU(kVal1, value);
+  NoBarrier_Store(&value, kVal2);
+  CHECK_EQU(kVal2, value);
+
+  Acquire_Store(&value, kVal1);
+  CHECK_EQU(kVal1, value);
+  Acquire_Store(&value, kVal2);
+  CHECK_EQU(kVal2, value);
+
+  Release_Store(&value, kVal1);
+  CHECK_EQU(kVal1, value);
+  Release_Store(&value, kVal2);
+  CHECK_EQU(kVal2, value);
+}
+
+
+// This is a simple sanity check to ensure that values are correct.
+// Not testing atomicity.
+template <class AtomicType>
+static void TestLoad() {
+  const AtomicType kVal1 = TestFillValue<AtomicType>();
+  const AtomicType kVal2 = static_cast<AtomicType>(-1);
+
+  AtomicType value;
+
+  value = kVal1;
+  CHECK_EQU(kVal1, NoBarrier_Load(&value));
+  value = kVal2;
+  CHECK_EQU(kVal2, NoBarrier_Load(&value));
+
+  value = kVal1;
+  CHECK_EQU(kVal1, Acquire_Load(&value));
+  value = kVal2;
+  CHECK_EQU(kVal2, Acquire_Load(&value));
+
+  value = kVal1;
+  CHECK_EQU(kVal1, Release_Load(&value));
+  value = kVal2;
+  CHECK_EQU(kVal2, Release_Load(&value));
+}
+
+
+TEST(AtomicIncrement) {
+  TestAtomicIncrement<Atomic32>();
+  TestAtomicIncrement<AtomicWord>();
+}
+
+
+TEST(CompareAndSwap) {
+  TestCompareAndSwap<Atomic32>();
+  TestCompareAndSwap<AtomicWord>();
+}
+
+
+TEST(AtomicExchange) {
+  TestAtomicExchange<Atomic32>();
+  TestAtomicExchange<AtomicWord>();
+}
+
+
+TEST(AtomicIncrementBounds) {
+  TestAtomicIncrementBounds<Atomic32>();
+  TestAtomicIncrementBounds<AtomicWord>();
+}
+
+
+TEST(Store) {
+  TestStore<Atomic32>();
+  TestStore<AtomicWord>();
+}
+
+
+TEST(Load) {
+  TestLoad<Atomic32>();
+  TestLoad<AtomicWord>();
+}