#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
/**
* read_barrier_depends - Flush all pending reads that subsequents reads
#define read_barrier_depends() do { } while(0)
-#ifdef CONFIG_X86_OOSTORE
-/* Actually there are no OOO store capable CPUs for now that do SSE,
- but make it already an possibility. */
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
-#else
-#define wmb() __asm__ __volatile__ ("": : :"memory")
-#endif
-
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
-#define smp_wmb() wmb()
+#ifdef CONFIG_X86_OOSTORE
+# define smp_wmb() wmb()
+#else
+# define smp_wmb() barrier()
+#endif
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
#else
*/
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
-
-#ifdef CONFIG_UNORDERED_IO
#define wmb() asm volatile("sfence" ::: "memory")
-#else
-#define wmb() asm volatile("" ::: "memory")
-#endif
+
#define read_barrier_depends() do {} while(0)
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)