LoongArch: Use the "move" pseudo-instruction where applicable
authorWANG Xuerui <git@xen0n.name>
Tue, 26 Jul 2022 15:57:18 +0000 (23:57 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Fri, 29 Jul 2022 10:22:32 +0000 (18:22 +0800)
Some of the assembly code in the LoongArch port likely originated
from a time when the assembler did not support pseudo-instructions like
"move" or "jr", so the desugared form was used and readability suffers
(to a minor degree) as a result.

As the upstream toolchain supports these pseudo-instructions from the
beginning, migrate the existing few usages to them for better
readability.

Signed-off-by: WANG Xuerui <git@xen0n.name>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/include/asm/atomic.h
arch/loongarch/include/asm/cmpxchg.h
arch/loongarch/include/asm/futex.h
arch/loongarch/include/asm/uaccess.h
arch/loongarch/kernel/head.S

index 979367a..a0a33ee 100644 (file)
@@ -157,7 +157,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
                __asm__ __volatile__(
                "1:     ll.w    %1, %2          # atomic_sub_if_positive\n"
                "       addi.w  %0, %1, %3                              \n"
-               "       or      %1, %0, $zero                           \n"
+               "       move    %1, %0                                  \n"
                "       blt     %0, $zero, 2f                           \n"
                "       sc.w    %1, %2                                  \n"
                "       beq     $zero, %1, 1b                           \n"
@@ -170,7 +170,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
                __asm__ __volatile__(
                "1:     ll.w    %1, %2          # atomic_sub_if_positive\n"
                "       sub.w   %0, %1, %3                              \n"
-               "       or      %1, %0, $zero                           \n"
+               "       move    %1, %0                                  \n"
                "       blt     %0, $zero, 2f                           \n"
                "       sc.w    %1, %2                                  \n"
                "       beq     $zero, %1, 1b                           \n"
@@ -320,7 +320,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
                __asm__ __volatile__(
                "1:     ll.d    %1, %2  # atomic64_sub_if_positive      \n"
                "       addi.d  %0, %1, %3                              \n"
-               "       or      %1, %0, $zero                           \n"
+               "       move    %1, %0                                  \n"
                "       blt     %0, $zero, 2f                           \n"
                "       sc.d    %1, %2                                  \n"
                "       beq     %1, $zero, 1b                           \n"
@@ -333,7 +333,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
                __asm__ __volatile__(
                "1:     ll.d    %1, %2  # atomic64_sub_if_positive      \n"
                "       sub.d   %0, %1, %3                              \n"
-               "       or      %1, %0, $zero                           \n"
+               "       move    %1, %0                                  \n"
                "       blt     %0, $zero, 2f                           \n"
                "       sc.d    %1, %2                                  \n"
                "       beq     %1, $zero, 1b                           \n"
index 75b3a44..9e99391 100644 (file)
@@ -55,7 +55,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
        __asm__ __volatile__(                                           \
        "1:     " ld "  %0, %2          # __cmpxchg_asm \n"             \
        "       bne     %0, %z3, 2f                     \n"             \
-       "       or      $t0, %z4, $zero                 \n"             \
+       "       move    $t0, %z4                        \n"             \
        "       " st "  $t0, %1                         \n"             \
        "       beq     $zero, $t0, 1b                  \n"             \
        "2:                                             \n"             \
index 9de8231..170ec9f 100644 (file)
@@ -82,7 +82,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
        "# futex_atomic_cmpxchg_inatomic                        \n"
        "1:     ll.w    %1, %3                                  \n"
        "       bne     %1, %z4, 3f                             \n"
-       "       or      $t0, %z5, $zero                         \n"
+       "       move    $t0, %z5                                \n"
        "2:     sc.w    $t0, %2                                 \n"
        "       beq     $zero, $t0, 1b                          \n"
        "3:                                                     \n"
index 42da432..2b44edc 100644 (file)
@@ -162,7 +162,7 @@ do {                                                                        \
        "2:                                                     \n"     \
        "       .section .fixup,\"ax\"                          \n"     \
        "3:     li.w    %0, %3                                  \n"     \
-       "       or      %1, $zero, $zero                        \n"     \
+       "       move    %1, $zero                               \n"     \
        "       b       2b                                      \n"     \
        "       .previous                                       \n"     \
        "       .section __ex_table,\"a\"                       \n"     \
index e553c5f..fd6a62f 100644 (file)
@@ -50,7 +50,7 @@ SYM_CODE_START(kernel_entry)                  # kernel entry point
        /* KSave3 used for percpu base, initialized as 0 */
        csrwr           zero, PERCPU_BASE_KS
        /* GPR21 used for percpu base (runtime), initialized as 0 */
-       or              u0, zero, zero
+       move            u0, zero
 
        la              tp, init_thread_union
        /* Set the SP after an empty pt_regs.  */