csky: Fixup asm/cmpxchg.h with correct ordering barrier
authorGuo Ren <guoren@linux.alibaba.com>
Sun, 20 Dec 2020 07:10:23 +0000 (07:10 +0000)
committerGuo Ren <guoren@linux.alibaba.com>
Tue, 12 Jan 2021 01:52:40 +0000 (09:52 +0800)
Optimize the performance of cmpxchg by using more fine-grained
acquire/release barriers.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Paul E. McKenney <paulmck@kernel.org>
arch/csky/include/asm/cmpxchg.h

index 8922453..dabc8e4 100644 (file)
@@ -3,12 +3,12 @@
 #ifndef __ASM_CSKY_CMPXCHG_H
 #define __ASM_CSKY_CMPXCHG_H
 
-#ifdef CONFIG_CPU_HAS_LDSTEX
+#ifdef CONFIG_SMP
 #include <asm/barrier.h>
 
 extern void __bad_xchg(void);
 
-#define __xchg(new, ptr, size)                                 \
+#define __xchg_relaxed(new, ptr, size)                         \
 ({                                                             \
        __typeof__(ptr) __ptr = (ptr);                          \
        __typeof__(new) __new = (new);                          \
@@ -16,7 +16,6 @@ extern void __bad_xchg(void);
        unsigned long tmp;                                      \
        switch (size) {                                         \
        case 4:                                                 \
-               smp_mb();                                       \
                asm volatile (                                  \
                "1:     ldex.w          %0, (%3) \n"            \
                "       mov             %1, %2   \n"            \
@@ -25,7 +24,6 @@ extern void __bad_xchg(void);
                        : "=&r" (__ret), "=&r" (tmp)            \
                        : "r" (__new), "r"(__ptr)               \
                        :);                                     \
-               smp_mb();                                       \
                break;                                          \
        default:                                                \
                __bad_xchg();                                   \
@@ -33,9 +31,10 @@ extern void __bad_xchg(void);
        __ret;                                                  \
 })
 
-#define xchg(ptr, x)   (__xchg((x), (ptr), sizeof(*(ptr))))
+#define xchg_relaxed(ptr, x) \
+               (__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
 
-#define __cmpxchg(ptr, old, new, size)                         \
+#define __cmpxchg_relaxed(ptr, old, new, size)                 \
 ({                                                             \
        __typeof__(ptr) __ptr = (ptr);                          \
        __typeof__(new) __new = (new);                          \
@@ -44,7 +43,6 @@ extern void __bad_xchg(void);
        __typeof__(*(ptr)) __ret;                               \
        switch (size) {                                         \
        case 4:                                                 \
-               smp_mb();                                       \
                asm volatile (                                  \
                "1:     ldex.w          %0, (%3) \n"            \
                "       cmpne           %0, %4   \n"            \
@@ -56,7 +54,6 @@ extern void __bad_xchg(void);
                        : "=&r" (__ret), "=&r" (__tmp)          \
                        : "r" (__new), "r"(__ptr), "r"(__old)   \
                        :);                                     \
-               smp_mb();                                       \
                break;                                          \
        default:                                                \
                __bad_xchg();                                   \
@@ -64,8 +61,18 @@ extern void __bad_xchg(void);
        __ret;                                                  \
 })
 
-#define cmpxchg(ptr, o, n) \
-       (__cmpxchg((ptr), (o), (n), sizeof(*(ptr))))
+#define cmpxchg_relaxed(ptr, o, n) \
+       (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
+
+#define cmpxchg(ptr, o, n)                                     \
+({                                                             \
+       __typeof__(*(ptr)) __ret;                               \
+       __smp_release_fence();                                  \
+       __ret = cmpxchg_relaxed(ptr, o, n);                     \
+       __smp_acquire_fence();                                  \
+       __ret;                                                  \
+})
+
 #else
 #include <asm-generic/cmpxchg.h>
 #endif