From 4420658a4a7b03e3f4afb925bdd3ab9e06c2c46f Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Thu, 5 May 2022 11:55:24 +0800 Subject: [PATCH] riscv: atomic: Add custom conditional atomic operation implementation Add conditional atomic operations' custom implementation (similar to dec_if_positive), here is the list: - arch_atomic_inc_unless_negative - arch_atomic_dec_unless_positive - arch_atomic64_inc_unless_negative - arch_atomic64_dec_unless_positive Signed-off-by: Guo Ren Signed-off-by: Guo Ren Link: https://lore.kernel.org/r/20220505035526.2974382-4-guoren@kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/atomic.h | 82 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index f3c6a6e..0dfe9d8 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -310,6 +310,46 @@ ATOMIC_OPS() #undef ATOMIC_OPS #undef ATOMIC_OP +static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v) +{ + int prev, rc; + + __asm__ __volatile__ ( + "0: lr.w %[p], %[c]\n" + " bltz %[p], 1f\n" + " addi %[rc], %[p], 1\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" + " fence rw, rw\n" + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : + : "memory"); + return !(prev < 0); +} + +#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative + +static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v) +{ + int prev, rc; + + __asm__ __volatile__ ( + "0: lr.w %[p], %[c]\n" + " bgtz %[p], 1f\n" + " addi %[rc], %[p], -1\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" + " fence rw, rw\n" + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : + : "memory"); + return !(prev > 0); +} + +#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive + static __always_inline int arch_atomic_dec_if_positive(atomic_t *v) { int prev, rc; @@ -331,6 +371,48 @@ static __always_inline int arch_atomic_dec_if_positive(atomic_t *v) #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive #ifndef CONFIG_GENERIC_ATOMIC64 +static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v) +{ + s64 prev; + long rc; + + __asm__ __volatile__ ( + "0: lr.d %[p], %[c]\n" + " bltz %[p], 1f\n" + " addi %[rc], %[p], 1\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" + " fence rw, rw\n" + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : + : "memory"); + return !(prev < 0); +} + +#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative + +static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v) +{ + s64 prev; + long rc; + + __asm__ __volatile__ ( + "0: lr.d %[p], %[c]\n" + " bgtz %[p], 1f\n" + " addi %[rc], %[p], -1\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" + " fence rw, rw\n" + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : + : "memory"); + return !(prev > 0); +} + +#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive + static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) { s64 prev; -- 2.7.4