MIPS: Unify sc beqz definition
authorPaul Burton <paul.burton@mips.com>
Tue, 1 Oct 2019 21:53:05 +0000 (21:53 +0000)
committerPaul Burton <paul.burton@mips.com>
Mon, 7 Oct 2019 16:42:13 +0000 (09:42 -0700)
We currently duplicate the definition of __scbeqz in asm/atomic.h &
asm/cmpxchg.h. Move it to asm/llsc.h & rename it to __SC_BEQZ to fit
better with the existing __SC macro provided there.

We include a tab in the string in order to avoid the need for users to
indent code any further to include whitespace of their own after the
instruction mnemonic.

Signed-off-by: Paul Burton <paul.burton@mips.com>
Cc: linux-mips@vger.kernel.org
Cc: Huacai Chen <chenhc@lemote.com>
Cc: Jiaxun Yang <jiaxun.yang@flygoat.com>
Cc: linux-kernel@vger.kernel.org
arch/mips/include/asm/atomic.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/llsc.h

index bb8658c..7578c80 100644 (file)
 #include <asm/compiler.h>
 #include <asm/cpu-features.h>
 #include <asm/cmpxchg.h>
+#include <asm/llsc.h>
 #include <asm/war.h>
 
-/*
- * Using a branch-likely instruction to check the result of an sc instruction
- * works around a bug present in R10000 CPUs prior to revision 3.0 that could
- * cause ll-sc sequences to execute non-atomically.
- */
-#if R10000_LLSC_WAR
-# define __scbeqz "beqzl"
-#else
-# define __scbeqz "beqz"
-#endif
-
 #define ATOMIC_INIT(i)   { (i) }
 
 /*
@@ -65,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v)                             \
                "1:     ll      %0, %1          # atomic_" #op "        \n"   \
                "       " #asm_op " %0, %2                              \n"   \
                "       sc      %0, %1                                  \n"   \
-               "\t" __scbeqz " %0, 1b                                  \n"   \
+               "\t" __SC_BEQZ "%0, 1b                                  \n"   \
                "       .set    pop                                     \n"   \
                : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
                : "Ir" (i) : __LLSC_CLOBBER);                                 \
@@ -93,7 +83,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)             \
                "1:     ll      %1, %2          # atomic_" #op "_return \n"   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       sc      %0, %2                                  \n"   \
-               "\t" __scbeqz " %0, 1b                                  \n"   \
+               "\t" __SC_BEQZ "%0, 1b                                  \n"   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       .set    pop                                     \n"   \
                : "=&r" (result), "=&r" (temp),                               \
@@ -127,7 +117,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v)            \
                "1:     ll      %1, %2          # atomic_fetch_" #op "  \n"   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       sc      %0, %2                                  \n"   \
-               "\t" __scbeqz " %0, 1b                                  \n"   \
+               "\t" __SC_BEQZ "%0, 1b                                  \n"   \
                "       .set    pop                                     \n"   \
                "       move    %0, %1                                  \n"   \
                : "=&r" (result), "=&r" (temp),                               \
@@ -205,7 +195,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                "       .set    push                                    \n"
                "       .set    "MIPS_ISA_LEVEL"                        \n"
                "       sc      %1, %2                                  \n"
-               "\t" __scbeqz " %1, 1b                                  \n"
+               "\t" __SC_BEQZ "%1, 1b                                  \n"
                "2:                                                     \n"
                "       .set    pop                                     \n"
                : "=&r" (result), "=&r" (temp),
@@ -267,7 +257,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v)               \
                "1:     lld     %0, %1          # atomic64_" #op "      \n"   \
                "       " #asm_op " %0, %2                              \n"   \
                "       scd     %0, %1                                  \n"   \
-               "\t" __scbeqz " %0, 1b                                  \n"   \
+               "\t" __SC_BEQZ "%0, 1b                                  \n"   \
                "       .set    pop                                     \n"   \
                : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
                : "Ir" (i) : __LLSC_CLOBBER);                                 \
@@ -295,7 +285,7 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)   \
                "1:     lld     %1, %2          # atomic64_" #op "_return\n"  \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       scd     %0, %2                                  \n"   \
-               "\t" __scbeqz " %0, 1b                                  \n"   \
+               "\t" __SC_BEQZ "%0, 1b                                  \n"   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       .set    pop                                     \n"   \
                : "=&r" (result), "=&r" (temp),                               \
@@ -329,7 +319,7 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)    \
                "1:     lld     %1, %2          # atomic64_fetch_" #op "\n"   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       scd     %0, %2                                  \n"   \
-               "\t" __scbeqz " %0, 1b                                  \n"   \
+               "\t" __SC_BEQZ "%0, 1b                                  \n"   \
                "       move    %0, %1                                  \n"   \
                "       .set    pop                                     \n"   \
                : "=&r" (result), "=&r" (temp),                               \
@@ -404,7 +394,7 @@ static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v)
                "       move    %1, %0                                  \n"
                "       bltz    %0, 1f                                  \n"
                "       scd     %1, %2                                  \n"
-               "\t" __scbeqz " %1, 1b                                  \n"
+               "\t" __SC_BEQZ "%1, 1b                                  \n"
                "1:                                                     \n"
                "       .set    pop                                     \n"
                : "=&r" (result), "=&r" (temp),
index 79bf34e..5d3f0e3 100644 (file)
 #include <linux/bug.h>
 #include <linux/irqflags.h>
 #include <asm/compiler.h>
+#include <asm/llsc.h>
 #include <asm/war.h>
 
 /*
- * Using a branch-likely instruction to check the result of an sc instruction
- * works around a bug present in R10000 CPUs prior to revision 3.0 that could
- * cause ll-sc sequences to execute non-atomically.
- */
-#if R10000_LLSC_WAR
-# define __scbeqz "beqzl"
-#else
-# define __scbeqz "beqz"
-#endif
-
-/*
  * These functions doesn't exist, so if they are called you'll either:
  *
  * - Get an error at compile-time due to __compiletime_error, if supported by
@@ -57,7 +47,7 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
                "       move    $1, %z3                         \n"     \
                "       .set    " MIPS_ISA_ARCH_LEVEL "         \n"     \
                "       " st "  $1, %1                          \n"     \
-               "\t" __scbeqz " $1, 1b                          \n"     \
+               "\t" __SC_BEQZ  "$1, 1b                         \n"     \
                "       .set    pop                             \n"     \
                : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)           \
                : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)                  \
@@ -130,7 +120,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
                "       move    $1, %z4                         \n"     \
                "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "       " st "  $1, %1                          \n"     \
-               "\t" __scbeqz " $1, 1b                          \n"     \
+               "\t" __SC_BEQZ  "$1, 1b                         \n"     \
                "       .set    pop                             \n"     \
                "2:                                             \n"     \
                : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)           \
@@ -268,7 +258,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
        /* Attempt to store new at ptr */
        "       scd     %L1, %2                         \n"
        /* If we failed, loop! */
-       "\t" __scbeqz " %L1, 1b                         \n"
+       "\t" __SC_BEQZ "%L1, 1b                         \n"
        "       .set    pop                             \n"
        "2:                                             \n"
        : "=&r"(ret),
@@ -311,6 +301,4 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
 # endif /* !CONFIG_SMP */
 #endif /* !CONFIG_64BIT */
 
-#undef __scbeqz
-
 #endif /* __ASM_CMPXCHG_H */
index c6d17d1..9b19f38 100644 (file)
 #define __EXT          "dext   "
 #endif
 
+/*
+ * Using a branch-likely instruction to check the result of an sc instruction
+ * works around a bug present in R10000 CPUs prior to revision 3.0 that could
+ * cause ll-sc sequences to execute non-atomically.
+ */
+#if R10000_LLSC_WAR
+# define __SC_BEQZ "beqzl      "
+#else
+# define __SC_BEQZ "beqz       "
+#endif
+
 #endif /* __ASM_LLSC_H  */