From: Peter Zijlstra Date: Thu, 13 Mar 2014 18:00:36 +0000 (+0100) Subject: arch,m32r: Convert smp_mb__*() X-Git-Tag: v4.9.8~6249^2~24 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=89607d5e2928d49bb64669d6d1e30e933a33f817;p=platform%2Fkernel%2Flinux-rpi3.git arch,m32r: Convert smp_mb__*() M32r uses asm-generic/barrier.h and its smp_mb() is barrier(); therefore we can use the generic versions which default to smp_mb(). Signed-off-by: Peter Zijlstra Acked-by: Paul E. McKenney Link: http://lkml.kernel.org/n/tip-wh6xljltyvmpy9t0bc80k1fy@git.kernel.org Cc: Hirokazu Takata Cc: Linus Torvalds Cc: linux-kernel@vger.kernel.org Cc: linux-m32r-ja@ml.linux-m32r.org Cc: linux-m32r@ml.linux-m32r.org Signed-off-by: Ingo Molnar --- diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 0d81697..8ad0ed4 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -13,6 +13,7 @@ #include #include #include +#include /* * Atomic operations that C can't guarantee us. Useful for @@ -308,10 +309,4 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) local_irq_restore(flags); } -/* Atomic operations are already serializing on m32r */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* _ASM_M32R_ATOMIC_H */ diff --git a/arch/m32r/include/asm/bitops.h b/arch/m32r/include/asm/bitops.h index d3dea9a..86ba2b4 100644 --- a/arch/m32r/include/asm/bitops.h +++ b/arch/m32r/include/asm/bitops.h @@ -21,6 +21,7 @@ #include #include #include +#include /* * These have to be done with inline assembly: that way the bit-setting @@ -73,7 +74,7 @@ static __inline__ void set_bit(int nr, volatile void * addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static __inline__ void clear_bit(int nr, volatile void * addr) @@ -103,9 +104,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr) local_irq_restore(flags); } -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - /** * change_bit - Toggle a bit in memory * @nr: Bit to clear