From: Will Deacon Date: Fri, 30 Aug 2019 11:55:39 +0000 (+0100) Subject: Merge branch 'for-next/atomics' into for-next/core X-Git-Tag: v5.4-rc1~100^2~5 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=61b7cddfe861f239bf39ab19a065e29b58153a80;p=platform%2Fkernel%2Flinux-rpi.git Merge branch 'for-next/atomics' into for-next/core * for-next/atomics: (10 commits) Rework LSE instruction selection to use static keys instead of alternatives --- 61b7cddfe861f239bf39ab19a065e29b58153a80 diff --cc arch/arm64/include/asm/atomic.h index a5ca239,916e5a6..9543b5e --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@@ -13,19 -13,94 +13,91 @@@ #include #include -#include - -#ifdef __KERNEL__ - + #include +#include - #define __ARM64_IN_ATOMIC_IMPL + #define ATOMIC_OP(op) \ + static inline void arch_##op(int i, atomic_t *v) \ + { \ + __lse_ll_sc_body(op, i, v); \ + } - #if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE) - #include - #else - #include - #endif + ATOMIC_OP(atomic_andnot) + ATOMIC_OP(atomic_or) + ATOMIC_OP(atomic_xor) + ATOMIC_OP(atomic_add) + ATOMIC_OP(atomic_and) + ATOMIC_OP(atomic_sub) - #undef __ARM64_IN_ATOMIC_IMPL + #undef ATOMIC_OP - #include + #define ATOMIC_FETCH_OP(name, op) \ + static inline int arch_##op##name(int i, atomic_t *v) \ + { \ + return __lse_ll_sc_body(op##name, i, v); \ + } + + #define ATOMIC_FETCH_OPS(op) \ + ATOMIC_FETCH_OP(_relaxed, op) \ + ATOMIC_FETCH_OP(_acquire, op) \ + ATOMIC_FETCH_OP(_release, op) \ + ATOMIC_FETCH_OP( , op) + + ATOMIC_FETCH_OPS(atomic_fetch_andnot) + ATOMIC_FETCH_OPS(atomic_fetch_or) + ATOMIC_FETCH_OPS(atomic_fetch_xor) + ATOMIC_FETCH_OPS(atomic_fetch_add) + ATOMIC_FETCH_OPS(atomic_fetch_and) + ATOMIC_FETCH_OPS(atomic_fetch_sub) + ATOMIC_FETCH_OPS(atomic_add_return) + ATOMIC_FETCH_OPS(atomic_sub_return) + + #undef ATOMIC_FETCH_OP + #undef ATOMIC_FETCH_OPS + + #define ATOMIC64_OP(op) \ + static inline void arch_##op(long i, atomic64_t *v) \ + { \ + __lse_ll_sc_body(op, i, v); \ + } + + ATOMIC64_OP(atomic64_andnot) + ATOMIC64_OP(atomic64_or) + ATOMIC64_OP(atomic64_xor) + ATOMIC64_OP(atomic64_add) + ATOMIC64_OP(atomic64_and) + ATOMIC64_OP(atomic64_sub) + + #undef ATOMIC64_OP + + #define ATOMIC64_FETCH_OP(name, op) \ + static inline long arch_##op##name(long i, atomic64_t *v) \ + { \ + return __lse_ll_sc_body(op##name, i, v); \ + } + + #define ATOMIC64_FETCH_OPS(op) \ + ATOMIC64_FETCH_OP(_relaxed, op) \ + ATOMIC64_FETCH_OP(_acquire, op) \ + ATOMIC64_FETCH_OP(_release, op) \ + ATOMIC64_FETCH_OP( , op) + + ATOMIC64_FETCH_OPS(atomic64_fetch_andnot) + ATOMIC64_FETCH_OPS(atomic64_fetch_or) + ATOMIC64_FETCH_OPS(atomic64_fetch_xor) + ATOMIC64_FETCH_OPS(atomic64_fetch_add) + ATOMIC64_FETCH_OPS(atomic64_fetch_and) + ATOMIC64_FETCH_OPS(atomic64_fetch_sub) + ATOMIC64_FETCH_OPS(atomic64_add_return) + ATOMIC64_FETCH_OPS(atomic64_sub_return) + + #undef ATOMIC64_FETCH_OP + #undef ATOMIC64_FETCH_OPS + + static inline long arch_atomic64_dec_if_positive(atomic64_t *v) + { + return __lse_ll_sc_body(atomic64_dec_if_positive, v); + } #define ATOMIC_INIT(i) { (i) }