#ifndef __ASM_ATOMIC_LL_SC_H
#define __ASM_ATOMIC_LL_SC_H
+#include <linux/stringify.h>
+
#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE)
#define __LL_SC_FALLBACK(asm_ops) \
" b 3f\n" \
#define __LL_SC_FALLBACK(asm_ops) asm_ops
#endif
+#ifndef CONFIG_CC_HAS_K_CONSTRAINT
+#define K
+#endif
+
/*
* AArch64 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
" stxr %w1, %w0, %2\n" \
" cbnz %w1, 1b\n") \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : #constraint "r" (i)); \
+ : __stringify(constraint) "r" (i)); \
}
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
" cbnz %w1, 1b\n" \
" " #mb ) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : #constraint "r" (i) \
+ : __stringify(constraint) "r" (i) \
: cl); \
\
return result; \
" cbnz %w2, 1b\n" \
" " #mb ) \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
- : #constraint "r" (i) \
+ : __stringify(constraint) "r" (i) \
: cl); \
\
return result; \
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
-ATOMIC_OPS(and, and, )
+ATOMIC_OPS(and, and, K)
+ATOMIC_OPS(or, orr, K)
+ATOMIC_OPS(xor, eor, K)
+/*
+ * GAS converts the mysterious and undocumented BIC (immediate) alias to
+ * an AND (immediate) instruction with the immediate inverted. We don't
+ * have a constraint for this, so fall back to register.
+ */
ATOMIC_OPS(andnot, bic, )
-ATOMIC_OPS(or, orr, )
-ATOMIC_OPS(xor, eor, )
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
" stxr %w1, %0, %2\n" \
" cbnz %w1, 1b") \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : #constraint "r" (i)); \
+ : __stringify(constraint) "r" (i)); \
}
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
" cbnz %w1, 1b\n" \
" " #mb ) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : #constraint "r" (i) \
+ : __stringify(constraint) "r" (i) \
: cl); \
\
return result; \
" cbnz %w2, 1b\n" \
" " #mb ) \
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
- : #constraint "r" (i) \
+ : __stringify(constraint) "r" (i) \
: cl); \
\
return result; \
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
ATOMIC64_OPS(and, and, L)
-ATOMIC64_OPS(andnot, bic, )
ATOMIC64_OPS(or, orr, L)
ATOMIC64_OPS(xor, eor, L)
+/*
+ * GAS converts the mysterious and undocumented BIC (immediate) alias to
+ * an AND (immediate) instruction with the immediate inverted. We don't
+ * have a constraint for this, so fall back to register.
+ */
+ATOMIC64_OPS(andnot, bic, )
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
"2:") \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
[v] "+Q" (*(u##sz *)ptr) \
- : [old] #constraint "r" (old), [new] "r" (new) \
+ : [old] __stringify(constraint) "r" (old), [new] "r" (new) \
: cl); \
\
return oldval; \
* handle the 'K' constraint for the value 4294967295 - thus we use no
* constraint for 32 bit operations.
*/
-__CMPXCHG_CASE(w, b, , 8, , , , , )
-__CMPXCHG_CASE(w, h, , 16, , , , , )
-__CMPXCHG_CASE(w, , , 32, , , , , )
+__CMPXCHG_CASE(w, b, , 8, , , , , K)
+__CMPXCHG_CASE(w, h, , 16, , , , , K)
+__CMPXCHG_CASE(w, , , 32, , , , , K)
__CMPXCHG_CASE( , , , 64, , , , , L)
-__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", )
-__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", )
-__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", )
+__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K)
+__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K)
+__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K)
__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
-__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", )
-__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", )
-__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", )
+__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K)
+__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K)
+__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K)
__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
-__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", )
-__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", )
-__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", )
+__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K)
+__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K)
+__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K)
__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
#undef __CMPXCHG_CASE
__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
#undef __CMPXCHG_DBL
+#undef K
#endif /* __ASM_ATOMIC_LL_SC_H */