locking/atomic: arm: fix sync ops
authorMark Rutland <mark.rutland@arm.com>
Mon, 5 Jun 2023 07:00:58 +0000 (08:00 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 19 Jul 2023 14:21:02 +0000 (16:21 +0200)
[ Upstream commit dda5f312bb09e56e7a1c3e3851f2000eb2e9c879 ]

The sync_*() ops on arch/arm are defined in terms of the regular bitops
with no special handling. This is not correct, as UP kernels elide
barriers for the fully-ordered operations, and so the required ordering
is lost when such UP kernels are run under a hypervsior on an SMP
system.

Fix this by defining sync ops with the required barriers.

Note: On 32-bit arm, the sync_*() ops are currently only used by Xen,
which requires ARMv7, but the semantics can be implemented for ARMv6+.

Fixes: e54d2f61528165bb ("xen/arm: sync_bitops")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-2-mark.rutland@arm.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/arm/include/asm/assembler.h
arch/arm/include/asm/sync_bitops.h
arch/arm/lib/bitops.h
arch/arm/lib/testchangebit.S
arch/arm/lib/testclearbit.S
arch/arm/lib/testsetbit.S

index 90fbe4a3f9c8472fe756bebefb7c779a063f0f2c..84912b19cac857af73fd19caf22d4fdca0160478 100644 (file)
@@ -402,6 +402,23 @@ ALT_UP_B(.L0_\@)
 #endif
        .endm
 
+/*
+ * Raw SMP data memory barrier
+ */
+       .macro  __smp_dmb mode
+#if __LINUX_ARM_ARCH__ >= 7
+       .ifeqs "\mode","arm"
+       dmb     ish
+       .else
+       W(dmb)  ish
+       .endif
+#elif __LINUX_ARM_ARCH__ == 6
+       mcr     p15, 0, r0, c7, c10, 5  @ dmb
+#else
+       .error "Incompatible SMP platform"
+#endif
+       .endm
+
 #if defined(CONFIG_CPU_V7M)
        /*
         * setmode is used to assert to be in svc mode during boot. For v7-M
index 6f5d627c44a3cbbefaeee6fca2b7f409f645b3fb..f46b3c570f92e296690b01dd0f259ff15dac4afc 100644 (file)
  * ops which are SMP safe even on a UP kernel.
  */
 
+/*
+ * Unordered
+ */
+
 #define sync_set_bit(nr, p)            _set_bit(nr, p)
 #define sync_clear_bit(nr, p)          _clear_bit(nr, p)
 #define sync_change_bit(nr, p)         _change_bit(nr, p)
-#define sync_test_and_set_bit(nr, p)   _test_and_set_bit(nr, p)
-#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
-#define sync_test_and_change_bit(nr, p)        _test_and_change_bit(nr, p)
 #define sync_test_bit(nr, addr)                test_bit(nr, addr)
-#define arch_sync_cmpxchg              arch_cmpxchg
 
+/*
+ * Fully ordered
+ */
+
+int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_set_bit(nr, p)   _sync_test_and_set_bit(nr, p)
+
+int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)
+
+int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
+#define sync_test_and_change_bit(nr, p)        _sync_test_and_change_bit(nr, p)
+
+#define arch_sync_cmpxchg(ptr, old, new)                               \
+({                                                                     \
+       __typeof__(*(ptr)) __ret;                                       \
+       __smp_mb__before_atomic();                                      \
+       __ret = arch_cmpxchg_relaxed((ptr), (old), (new));              \
+       __smp_mb__after_atomic();                                       \
+       __ret;                                                          \
+})
 
 #endif
index 95bd359912889a5d31ceaefeaefb7597deaf5c14..f069d1b2318e643111f828e719f454e42f9ca241 100644 (file)
@@ -28,7 +28,7 @@ UNWIND(       .fnend          )
 ENDPROC(\name          )
        .endm
 
-       .macro  testop, name, instr, store
+       .macro  __testop, name, instr, store, barrier
 ENTRY( \name           )
 UNWIND(        .fnstart        )
        ands    ip, r1, #3
@@ -38,7 +38,7 @@ UNWIND(       .fnstart        )
        mov     r0, r0, lsr #5
        add     r1, r1, r0, lsl #2      @ Get word offset
        mov     r3, r2, lsl r3          @ create mask
-       smp_dmb
+       \barrier
 #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
        .arch_extension mp
        ALT_SMP(W(pldw) [r1])
@@ -50,13 +50,21 @@ UNWIND(     .fnstart        )
        strex   ip, r2, [r1]
        cmp     ip, #0
        bne     1b
-       smp_dmb
+       \barrier
        cmp     r0, #0
        movne   r0, #1
 2:     bx      lr
 UNWIND(        .fnend          )
 ENDPROC(\name          )
        .endm
+
+       .macro  testop, name, instr, store
+       __testop \name, \instr, \store, smp_dmb
+       .endm
+
+       .macro  sync_testop, name, instr, store
+       __testop \name, \instr, \store, __smp_dmb
+       .endm
 #else
        .macro  bitop, name, instr
 ENTRY( \name           )
index 4ebecc67e6e044ab6fc38fb254314a2cd0332c83..f13fe9bc2399ad81bee58d0bfd933c007326ed04 100644 (file)
@@ -10,3 +10,7 @@
                 .text
 
 testop _test_and_change_bit, eor, str
+
+#if __LINUX_ARM_ARCH__ >= 6
+sync_testop    _sync_test_and_change_bit, eor, str
+#endif
index 009afa0f5b4a73fcafac5b2736aa90258fe1f77d..4d2c5ca620ebf6230dff24656a6e281afe8c2483 100644 (file)
@@ -10,3 +10,7 @@
                 .text
 
 testop _test_and_clear_bit, bicne, strne
+
+#if __LINUX_ARM_ARCH__ >= 6
+sync_testop    _sync_test_and_clear_bit, bicne, strne
+#endif
index f3192e55acc87fede3a34a50d7ca6d446b049b89..649dbab65d8d0e3735a8c395a3bd85edeb0ac81c 100644 (file)
@@ -10,3 +10,7 @@
                 .text
 
 testop _test_and_set_bit, orreq, streq
+
+#if __LINUX_ARM_ARCH__ >= 6
+sync_testop    _sync_test_and_set_bit, orreq, streq
+#endif