// default values should hopefully be pretty safe.
struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // bug can't exist before process spawns multiple threads
+#if !defined(__SSE2__)
+ false, // no SSE2
+#endif
};
} } // namespace v8::base
} else {
AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
}
+
+#if !defined(__SSE2__)
+ // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
+ AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
+#endif
}
class AtomicOpsx86Initializer {
struct AtomicOps_x86CPUFeatureStruct {
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
// after acquire compare-and-swap.
+#if !defined(__SSE2__)
+ bool has_sse2; // Processor has SSE2.
+#endif
};
extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
*ptr = value;
}
-// We require SSE2, so mfence is guaranteed to exist.
+#if defined(__x86_64__) || defined(__SSE2__)
+
+// 64-bit implementations of memory barrier can be simpler, because it
+// "mfence" is guaranteed to exist.
inline void MemoryBarrier() {
__asm__ __volatile__("mfence" : : : "memory");
}
MemoryBarrier();
}
+#else
+
+inline void MemoryBarrier() {
+ if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+ __asm__ __volatile__("mfence" : : : "memory");
+ } else { // mfence is faster but not present on PIII
+ Atomic32 x = 0;
+ NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
+ }
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+ *ptr = value;
+ __asm__ __volatile__("mfence" : : : "memory");
+ } else {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier on PIII
+ }
+}
+#endif
+
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
ATOMICOPS_COMPILER_BARRIER();
*ptr = value; // An x86 store acts as a release barrier.