namespace v8 {
namespace internal {
+typedef char Atomic8;
typedef int32_t Atomic32;
#ifdef V8_HOST_ARCH_64_BIT
// We need to be able to go between Atomic64 and AtomicWord implicitly. This
Atomic32 new_value);
void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value);
void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+Atomic8 NoBarrier_Load(volatile const Atomic8* ptr);
Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
Atomic32 Acquire_Load(volatile const Atomic32* ptr);
Atomic32 Release_Load(volatile const Atomic32* ptr);
return prev;
}
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
); // NOLINT
}
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
return *ptr;
}
+// Byte accessors.
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; }
+
} } // namespace v8::internal
#endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
return Acquire_CompareAndSwap(ptr, old_value, new_value);
}
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
*ptr = value;
}
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
*ptr = value;
}
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
return cmp;
}
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
__tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
}
__tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
}
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed);
+}
+
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
}
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
// See comments in Atomic64 version of Release_Store(), below.
}
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+ *ptr = value;
+}
+
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
// See comments in Atomic64 version of Release_Store() below.
}
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+ return *ptr;
+}
+
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
}
+// Merge this test with TestStore as soon as we have Atomic8 acquire
+// and release stores.
+static void TestStoreAtomic8() {
+ const Atomic8 kVal1 = TestFillValue<Atomic8>();
+ const Atomic8 kVal2 = static_cast<Atomic8>(-1);
+
+ Atomic8 value;
+
+ NoBarrier_Store(&value, kVal1);
+ CHECK_EQU(kVal1, value);
+ NoBarrier_Store(&value, kVal2);
+ CHECK_EQU(kVal2, value);
+}
+
+
// This is a simple sanity check to ensure that values are correct.
// Not testing atomicity.
template <class AtomicType>
}
+// Merge this test with TestLoad as soon as we have Atomic8 acquire
+// and release loads.
+static void TestLoadAtomic8() {
+ const Atomic8 kVal1 = TestFillValue<Atomic8>();
+ const Atomic8 kVal2 = static_cast<Atomic8>(-1);
+
+ Atomic8 value;
+
+ value = kVal1;
+ CHECK_EQU(kVal1, NoBarrier_Load(&value));
+ value = kVal2;
+ CHECK_EQU(kVal2, NoBarrier_Load(&value));
+}
+
+
TEST(AtomicIncrement) {
TestAtomicIncrement<Atomic32>();
TestAtomicIncrement<AtomicWord>();
TEST(Store) {
+ TestStoreAtomic8();
TestStore<Atomic32>();
TestStore<AtomicWord>();
}
TEST(Load) {
+ TestLoadAtomic8();
TestLoad<Atomic32>();
TestLoad<AtomicWord>();
}