locking/atomic: riscv: move to ARCH_ATOMIC
authorMark Rutland <mark.rutland@arm.com>
Tue, 25 May 2021 14:02:27 +0000 (15:02 +0100)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 26 May 2021 11:20:52 +0000 (13:20 +0200)
We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates riscv to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Palmer Dabbelt <palmerdabbelt@google.com>
Acked-by: Palmer Dabbelt <palmerdabbelt@google.com>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-29-mark.rutland@arm.com
arch/riscv/Kconfig
arch/riscv/include/asm/atomic.h
arch/riscv/include/asm/cmpxchg.h

index a8ad8eb761206ab51d29f0351f6606048a867bb6..c59b9f4a9d62a50f7a04216fb44417a20291d15f 100644 (file)
@@ -12,6 +12,7 @@ config 32BIT
 
 config RISCV
        def_bool y
+       select ARCH_ATOMIC
        select ARCH_CLOCKSOURCE_INIT
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
index 400a8c8b6de752bc41c4a507a4dbd6ef52a23463..ac9bdf4fc4044b0b784836e40dd6b4e9aab9f294 100644 (file)
 #define __atomic_release_fence()                                       \
        __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
 
-static __always_inline int atomic_read(const atomic_t *v)
+static __always_inline int arch_atomic_read(const atomic_t *v)
 {
        return READ_ONCE(v->counter);
 }
-static __always_inline void atomic_set(atomic_t *v, int i)
+static __always_inline void arch_atomic_set(atomic_t *v, int i)
 {
        WRITE_ONCE(v->counter, i);
 }
 
 #ifndef CONFIG_GENERIC_ATOMIC64
 #define ATOMIC64_INIT(i) { (i) }
-static __always_inline s64 atomic64_read(const atomic64_t *v)
+static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
 {
        return READ_ONCE(v->counter);
 }
-static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
 {
        WRITE_ONCE(v->counter, i);
 }
@@ -53,7 +53,7 @@ static __always_inline void atomic64_set(atomic64_t *v, s64 i)
  */
 #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix)             \
 static __always_inline                                                 \
-void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)            \
+void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v)       \
 {                                                                      \
        __asm__ __volatile__ (                                          \
                "       amo" #asm_op "." #asm_type " zero, %1, %0"      \
@@ -87,7 +87,7 @@ ATOMIC_OPS(xor, xor,  i)
  */
 #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix)       \
 static __always_inline                                                 \
-c_type atomic##prefix##_fetch_##op##_relaxed(c_type i,                 \
+c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i,            \
                                             atomic##prefix##_t *v)     \
 {                                                                      \
        register c_type ret;                                            \
@@ -99,7 +99,7 @@ c_type atomic##prefix##_fetch_##op##_relaxed(c_type i,                        \
        return ret;                                                     \
 }                                                                      \
 static __always_inline                                                 \
-c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v)    \
+c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v)       \
 {                                                                      \
        register c_type ret;                                            \
        __asm__ __volatile__ (                                          \
@@ -112,15 +112,15 @@ c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v)       \
 
 #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix)        \
 static __always_inline                                                 \
-c_type atomic##prefix##_##op##_return_relaxed(c_type i,                        \
+c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i,           \
                                              atomic##prefix##_t *v)    \
 {                                                                      \
-        return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I;     \
+        return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I;        \
 }                                                                      \
 static __always_inline                                                 \
-c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
+c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v)    \
 {                                                                      \
-        return atomic##prefix##_fetch_##op(i, v) c_op I;               \
+        return arch_atomic##prefix##_fetch_##op(i, v) c_op I;          \
 }
 
 #ifdef CONFIG_GENERIC_ATOMIC64
@@ -138,26 +138,26 @@ c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v)    \
 ATOMIC_OPS(add, add, +,  i)
 ATOMIC_OPS(sub, add, +, -i)
 
-#define atomic_add_return_relaxed      atomic_add_return_relaxed
-#define atomic_sub_return_relaxed      atomic_sub_return_relaxed
-#define atomic_add_return              atomic_add_return
-#define atomic_sub_return              atomic_sub_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_add_return         arch_atomic_add_return
+#define arch_atomic_sub_return         arch_atomic_sub_return
 
-#define atomic_fetch_add_relaxed       atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed       atomic_fetch_sub_relaxed
-#define atomic_fetch_add               atomic_fetch_add
-#define atomic_fetch_sub               atomic_fetch_sub
+#define arch_atomic_fetch_add_relaxed  arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed  arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add          arch_atomic_fetch_add
+#define arch_atomic_fetch_sub          arch_atomic_fetch_sub
 
 #ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_add_return_relaxed    atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed    atomic64_sub_return_relaxed
-#define atomic64_add_return            atomic64_add_return
-#define atomic64_sub_return            atomic64_sub_return
-
-#define atomic64_fetch_add_relaxed     atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed     atomic64_fetch_sub_relaxed
-#define atomic64_fetch_add             atomic64_fetch_add
-#define atomic64_fetch_sub             atomic64_fetch_sub
+#define arch_atomic64_add_return_relaxed       arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed       arch_atomic64_sub_return_relaxed
+#define arch_atomic64_add_return               arch_atomic64_add_return
+#define arch_atomic64_sub_return               arch_atomic64_sub_return
+
+#define arch_atomic64_fetch_add_relaxed        arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed        arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add                arch_atomic64_fetch_add
+#define arch_atomic64_fetch_sub                arch_atomic64_fetch_sub
 #endif
 
 #undef ATOMIC_OPS
@@ -175,20 +175,20 @@ ATOMIC_OPS(and, and, i)
 ATOMIC_OPS( or,  or, i)
 ATOMIC_OPS(xor, xor, i)
 
-#define atomic_fetch_and_relaxed       atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed                atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed       atomic_fetch_xor_relaxed
-#define atomic_fetch_and               atomic_fetch_and
-#define atomic_fetch_or                        atomic_fetch_or
-#define atomic_fetch_xor               atomic_fetch_xor
+#define arch_atomic_fetch_and_relaxed  arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed   arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed  arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and          arch_atomic_fetch_and
+#define arch_atomic_fetch_or           arch_atomic_fetch_or
+#define arch_atomic_fetch_xor          arch_atomic_fetch_xor
 
 #ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_fetch_and_relaxed     atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed      atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed     atomic64_fetch_xor_relaxed
-#define atomic64_fetch_and             atomic64_fetch_and
-#define atomic64_fetch_or              atomic64_fetch_or
-#define atomic64_fetch_xor             atomic64_fetch_xor
+#define arch_atomic64_fetch_and_relaxed        arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed        arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and                arch_atomic64_fetch_and
+#define arch_atomic64_fetch_or         arch_atomic64_fetch_or
+#define arch_atomic64_fetch_xor                arch_atomic64_fetch_xor
 #endif
 
 #undef ATOMIC_OPS
@@ -197,7 +197,7 @@ ATOMIC_OPS(xor, xor, i)
 #undef ATOMIC_OP_RETURN
 
 /* This is required to provide a full barrier on success. */
-static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
 {
        int prev, rc;
 
@@ -214,10 +214,10 @@ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
                : "memory");
        return prev;
 }
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
 
 #ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 {
        s64 prev;
        long rc;
@@ -235,7 +235,7 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
                : "memory");
        return prev;
 }
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
 #endif
 
 /*
@@ -244,45 +244,45 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
  */
 #define ATOMIC_OP(c_t, prefix, size)                                   \
 static __always_inline                                                 \
-c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n)                \
+c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n)   \
 {                                                                      \
        return __xchg_relaxed(&(v->counter), n, size);                  \
 }                                                                      \
 static __always_inline                                                 \
-c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n)                \
+c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n)   \
 {                                                                      \
        return __xchg_acquire(&(v->counter), n, size);                  \
 }                                                                      \
 static __always_inline                                                 \
-c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n)                \
+c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n)   \
 {                                                                      \
        return __xchg_release(&(v->counter), n, size);                  \
 }                                                                      \
 static __always_inline                                                 \
-c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n)                        \
+c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n)           \
 {                                                                      \
        return __xchg(&(v->counter), n, size);                          \
 }                                                                      \
 static __always_inline                                                 \
-c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v,            \
+c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v,       \
                                     c_t o, c_t n)                      \
 {                                                                      \
        return __cmpxchg_relaxed(&(v->counter), o, n, size);            \
 }                                                                      \
 static __always_inline                                                 \
-c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v,            \
+c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v,       \
                                     c_t o, c_t n)                      \
 {                                                                      \
        return __cmpxchg_acquire(&(v->counter), o, n, size);            \
 }                                                                      \
 static __always_inline                                                 \
-c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v,            \
+c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v,       \
                                     c_t o, c_t n)                      \
 {                                                                      \
        return __cmpxchg_release(&(v->counter), o, n, size);            \
 }                                                                      \
 static __always_inline                                                 \
-c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n)      \
+c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
 {                                                                      \
        return __cmpxchg(&(v->counter), o, n, size);                    \
 }
@@ -298,19 +298,19 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
 
 ATOMIC_OPS()
 
-#define atomic_xchg_relaxed atomic_xchg_relaxed
-#define atomic_xchg_acquire atomic_xchg_acquire
-#define atomic_xchg_release atomic_xchg_release
-#define atomic_xchg atomic_xchg
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#define atomic_cmpxchg atomic_cmpxchg
+#define arch_atomic_xchg_relaxed       arch_atomic_xchg_relaxed
+#define arch_atomic_xchg_acquire       arch_atomic_xchg_acquire
+#define arch_atomic_xchg_release       arch_atomic_xchg_release
+#define arch_atomic_xchg               arch_atomic_xchg
+#define arch_atomic_cmpxchg_relaxed    arch_atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_acquire    arch_atomic_cmpxchg_acquire
+#define arch_atomic_cmpxchg_release    arch_atomic_cmpxchg_release
+#define arch_atomic_cmpxchg            arch_atomic_cmpxchg
 
 #undef ATOMIC_OPS
 #undef ATOMIC_OP
 
-static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
+static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
 {
        int prev, rc;
 
@@ -328,10 +328,10 @@ static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
        return prev - offset;
 }
 
-#define atomic_dec_if_positive(v)      atomic_sub_if_positive(v, 1)
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(v, 1)
 
 #ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
+static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offset)
 {
        s64 prev;
        long rc;
@@ -350,7 +350,7 @@ static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
        return prev - offset;
 }
 
-#define atomic64_dec_if_positive(v)    atomic64_sub_if_positive(v, 1)
+#define arch_atomic64_dec_if_positive(v)       arch_atomic64_sub_if_positive(v, 1)
 #endif
 
 #endif /* _ASM_RISCV_ATOMIC_H */
index 262e5bbb27760840d598e4e940a340fc5d820178..36dc962f634366002ce6861c8aacb74bf0a480ea 100644 (file)
@@ -37,7 +37,7 @@
        __ret;                                                          \
 })
 
-#define xchg_relaxed(ptr, x)                                           \
+#define arch_xchg_relaxed(ptr, x)                                      \
 ({                                                                     \
        __typeof__(*(ptr)) _x_ = (x);                                   \
        (__typeof__(*(ptr))) __xchg_relaxed((ptr),                      \
@@ -72,7 +72,7 @@
        __ret;                                                          \
 })
 
-#define xchg_acquire(ptr, x)                                           \
+#define arch_xchg_acquire(ptr, x)                                      \
 ({                                                                     \
        __typeof__(*(ptr)) _x_ = (x);                                   \
        (__typeof__(*(ptr))) __xchg_acquire((ptr),                      \
        __ret;                                                          \
 })
 
-#define xchg_release(ptr, x)                                           \
+#define arch_xchg_release(ptr, x)                                      \
 ({                                                                     \
        __typeof__(*(ptr)) _x_ = (x);                                   \
        (__typeof__(*(ptr))) __xchg_release((ptr),                      \
        __ret;                                                          \
 })
 
-#define xchg(ptr, x)                                                   \
+#define arch_xchg(ptr, x)                                              \
 ({                                                                     \
        __typeof__(*(ptr)) _x_ = (x);                                   \
        (__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr)));        \
 #define xchg32(ptr, x)                                                 \
 ({                                                                     \
        BUILD_BUG_ON(sizeof(*(ptr)) != 4);                              \
-       xchg((ptr), (x));                                               \
+       arch_xchg((ptr), (x));                                          \
 })
 
 #define xchg64(ptr, x)                                                 \
 ({                                                                     \
        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
-       xchg((ptr), (x));                                               \
+       arch_xchg((ptr), (x));                                          \
 })
 
 /*
        __ret;                                                          \
 })
 
-#define cmpxchg_relaxed(ptr, o, n)                                     \
+#define arch_cmpxchg_relaxed(ptr, o, n)                                        \
 ({                                                                     \
        __typeof__(*(ptr)) _o_ = (o);                                   \
        __typeof__(*(ptr)) _n_ = (n);                                   \
        __ret;                                                          \
 })
 
-#define cmpxchg_acquire(ptr, o, n)                                     \
+#define arch_cmpxchg_acquire(ptr, o, n)                                        \
 ({                                                                     \
        __typeof__(*(ptr)) _o_ = (o);                                   \
        __typeof__(*(ptr)) _n_ = (n);                                   \
        __ret;                                                          \
 })
 
-#define cmpxchg_release(ptr, o, n)                                     \
+#define arch_cmpxchg_release(ptr, o, n)                                        \
 ({                                                                     \
        __typeof__(*(ptr)) _o_ = (o);                                   \
        __typeof__(*(ptr)) _n_ = (n);                                   \
        __ret;                                                          \
 })
 
-#define cmpxchg(ptr, o, n)                                             \
+#define arch_cmpxchg(ptr, o, n)                                                \
 ({                                                                     \
        __typeof__(*(ptr)) _o_ = (o);                                   \
        __typeof__(*(ptr)) _n_ = (n);                                   \
                                       _o_, _n_, sizeof(*(ptr)));       \
 })
 
-#define cmpxchg_local(ptr, o, n)                                       \
+#define arch_cmpxchg_local(ptr, o, n)                                  \
        (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
 
 #define cmpxchg32(ptr, o, n)                                           \
 ({                                                                     \
        BUILD_BUG_ON(sizeof(*(ptr)) != 4);                              \
-       cmpxchg((ptr), (o), (n));                                       \
+       arch_cmpxchg((ptr), (o), (n));                                  \
 })
 
 #define cmpxchg32_local(ptr, o, n)                                     \
 ({                                                                     \
        BUILD_BUG_ON(sizeof(*(ptr)) != 4);                              \
-       cmpxchg_relaxed((ptr), (o), (n))                                \
+       arch_cmpxchg_relaxed((ptr), (o), (n))                           \
 })
 
-#define cmpxchg64(ptr, o, n)                                           \
+#define arch_cmpxchg64(ptr, o, n)                                      \
 ({                                                                     \
        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
-       cmpxchg((ptr), (o), (n));                                       \
+       arch_cmpxchg((ptr), (o), (n));                                  \
 })
 
-#define cmpxchg64_local(ptr, o, n)                                     \
+#define arch_cmpxchg64_local(ptr, o, n)                                        \
 ({                                                                     \
        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
-       cmpxchg_relaxed((ptr), (o), (n));                               \
+       arch_cmpxchg_relaxed((ptr), (o), (n));                          \
 })
 
 #endif /* _ASM_RISCV_CMPXCHG_H */