From: Luis Marques Date: Tue, 17 Sep 2019 10:52:09 +0000 (+0000) Subject: Revert Patch from Phabricator X-Git-Tag: llvmorg-11-init~9024 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=2d550d19b321837aac647ec9e8c5b6f26f682b17;p=platform%2Fupstream%2Fllvm.git Revert Patch from Phabricator This reverts r372092 (git commit e38695a0255c9e7b53639f349f8101bae1ce5c04) llvm-svn: 372104 --- diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h index 7d0373a..fa19252 100644 --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -80,7 +80,6 @@ public: const SelectionDAGTargetInfo *getSelectionDAGInfo() const override { return &TSInfo; } - bool enableMachineScheduler() const override { return true; } bool hasStdExtM() const { return HasStdExtM; } bool hasStdExtA() const { return HasStdExtA; } bool hasStdExtF() const { return HasStdExtF; } diff --git a/llvm/test/CodeGen/RISCV/add-before-shl.ll b/llvm/test/CodeGen/RISCV/add-before-shl.ll index 51e5592..3279de3 100644 --- a/llvm/test/CodeGen/RISCV/add-before-shl.ll +++ b/llvm/test/CodeGen/RISCV/add-before-shl.ll @@ -96,36 +96,36 @@ define i128 @add_wide_operand(i128 %a) nounwind { ; RV32I-LABEL: add_wide_operand: ; RV32I: # %bb.0: ; RV32I-NEXT: lw a2, 0(a1) -; RV32I-NEXT: lw a3, 4(a1) -; RV32I-NEXT: lw a6, 12(a1) -; RV32I-NEXT: lw a1, 8(a1) -; RV32I-NEXT: srli a5, a2, 29 -; RV32I-NEXT: slli a4, a3, 3 -; RV32I-NEXT: or a4, a4, a5 -; RV32I-NEXT: srli a3, a3, 29 -; RV32I-NEXT: slli a5, a1, 3 -; RV32I-NEXT: or a3, a5, a3 -; RV32I-NEXT: srli a1, a1, 29 -; RV32I-NEXT: slli a5, a6, 3 -; RV32I-NEXT: or a1, a5, a1 +; RV32I-NEXT: srli a3, a2, 29 +; RV32I-NEXT: lw a4, 4(a1) +; RV32I-NEXT: slli a5, a4, 3 +; RV32I-NEXT: or a6, a5, a3 +; RV32I-NEXT: srli a4, a4, 29 +; RV32I-NEXT: lw a5, 8(a1) +; RV32I-NEXT: slli a3, a5, 3 +; RV32I-NEXT: or a3, a3, a4 ; RV32I-NEXT: slli a2, a2, 3 -; RV32I-NEXT: lui a5, 128 -; RV32I-NEXT: add a1, a1, a5 ; RV32I-NEXT: sw a2, 0(a0) ; RV32I-NEXT: sw a3, 8(a0) -; RV32I-NEXT: sw a4, 4(a0) +; RV32I-NEXT: sw a6, 4(a0) +; RV32I-NEXT: srli a2, a5, 29 +; RV32I-NEXT: lw a1, 12(a1) +; RV32I-NEXT: slli a1, a1, 3 +; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: lui a2, 128 +; RV32I-NEXT: add a1, a1, a2 ; RV32I-NEXT: sw a1, 12(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: add_wide_operand: ; RV64I: # %bb.0: -; RV64I-NEXT: srli a2, a0, 61 ; RV64I-NEXT: slli a1, a1, 3 +; RV64I-NEXT: srli a2, a0, 61 ; RV64I-NEXT: or a1, a1, a2 -; RV64I-NEXT: slli a0, a0, 3 ; RV64I-NEXT: addi a2, zero, 1 ; RV64I-NEXT: slli a2, a2, 51 ; RV64I-NEXT: add a1, a1, a2 +; RV64I-NEXT: slli a0, a0, 3 ; RV64I-NEXT: ret %1 = add i128 %a, 5192296858534827628530496329220096 %2 = shl i128 %1, 3 diff --git a/llvm/test/CodeGen/RISCV/addc-adde-sube-subc.ll b/llvm/test/CodeGen/RISCV/addc-adde-sube-subc.ll index 5fd8261..068e52f 100644 --- a/llvm/test/CodeGen/RISCV/addc-adde-sube-subc.ll +++ b/llvm/test/CodeGen/RISCV/addc-adde-sube-subc.ll @@ -20,9 +20,9 @@ define i64 @addc_adde(i64 %a, i64 %b) nounwind { define i64 @subc_sube(i64 %a, i64 %b) nounwind { ; RV32I-LABEL: subc_sube: ; RV32I: # %bb.0: -; RV32I-NEXT: sltu a4, a0, a2 ; RV32I-NEXT: sub a1, a1, a3 -; RV32I-NEXT: sub a1, a1, a4 +; RV32I-NEXT: sltu a3, a0, a2 +; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a0, a0, a2 ; RV32I-NEXT: ret %1 = sub i64 %a, %b diff --git a/llvm/test/CodeGen/RISCV/addcarry.ll b/llvm/test/CodeGen/RISCV/addcarry.ll index 71711f9..5a25fb9 100644 --- a/llvm/test/CodeGen/RISCV/addcarry.ll +++ b/llvm/test/CodeGen/RISCV/addcarry.ll @@ -10,17 +10,17 @@ define i64 @addcarry(i64 %x, i64 %y) nounwind { ; RISCV32-LABEL: addcarry: ; RISCV32: # %bb.0: ; RISCV32-NEXT: mul a4, a0, a3 -; RISCV32-NEXT: mulhu a7, a0, a2 -; RISCV32-NEXT: add a4, a7, a4 -; RISCV32-NEXT: mul a5, a1, a2 -; RISCV32-NEXT: add a6, a4, a5 -; RISCV32-NEXT: sltu t0, a6, a4 -; RISCV32-NEXT: sltu a4, a4, a7 -; RISCV32-NEXT: mulhu a5, a0, a3 +; RISCV32-NEXT: mulhu a5, a0, a2 ; RISCV32-NEXT: add a4, a5, a4 +; RISCV32-NEXT: sltu a6, a4, a5 +; RISCV32-NEXT: mulhu a5, a0, a3 +; RISCV32-NEXT: add a6, a5, a6 ; RISCV32-NEXT: mulhu a5, a1, a2 -; RISCV32-NEXT: add a4, a4, a5 -; RISCV32-NEXT: add a4, a4, t0 +; RISCV32-NEXT: add a7, a6, a5 +; RISCV32-NEXT: mul a5, a1, a2 +; RISCV32-NEXT: add a6, a4, a5 +; RISCV32-NEXT: sltu a4, a6, a4 +; RISCV32-NEXT: add a4, a7, a4 ; RISCV32-NEXT: mul a5, a1, a3 ; RISCV32-NEXT: add a5, a4, a5 ; RISCV32-NEXT: bgez a1, .LBB0_2 @@ -31,13 +31,13 @@ define i64 @addcarry(i64 %x, i64 %y) nounwind { ; RISCV32-NEXT: # %bb.3: ; RISCV32-NEXT: sub a5, a5, a0 ; RISCV32-NEXT: .LBB0_4: -; RISCV32-NEXT: slli a1, a5, 30 -; RISCV32-NEXT: srli a3, a6, 2 -; RISCV32-NEXT: or a1, a1, a3 -; RISCV32-NEXT: slli a3, a6, 30 ; RISCV32-NEXT: mul a0, a0, a2 ; RISCV32-NEXT: srli a0, a0, 2 -; RISCV32-NEXT: or a0, a3, a0 +; RISCV32-NEXT: slli a1, a6, 30 +; RISCV32-NEXT: or a0, a1, a0 +; RISCV32-NEXT: srli a1, a6, 2 +; RISCV32-NEXT: slli a2, a5, 30 +; RISCV32-NEXT: or a1, a2, a1 ; RISCV32-NEXT: ret %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 2); ret i64 %tmp; diff --git a/llvm/test/CodeGen/RISCV/alloca.ll b/llvm/test/CodeGen/RISCV/alloca.ll index 6cd9bf4..22dd42c 100644 --- a/llvm/test/CodeGen/RISCV/alloca.ll +++ b/llvm/test/CodeGen/RISCV/alloca.ll @@ -82,7 +82,8 @@ define void @alloca_callframe(i32 %n) nounwind { ; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: addi a1, zero, 10 ; RV32I-NEXT: sw a1, 4(sp) -; RV32I-NEXT: addi t0, zero, 9 +; RV32I-NEXT: addi a1, zero, 9 +; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: addi a1, zero, 2 ; RV32I-NEXT: addi a2, zero, 3 ; RV32I-NEXT: addi a3, zero, 4 @@ -90,7 +91,6 @@ define void @alloca_callframe(i32 %n) nounwind { ; RV32I-NEXT: addi a5, zero, 6 ; RV32I-NEXT: addi a6, zero, 7 ; RV32I-NEXT: addi a7, zero, 8 -; RV32I-NEXT: sw t0, 0(sp) ; RV32I-NEXT: call func ; RV32I-NEXT: addi sp, sp, 16 ; RV32I-NEXT: addi sp, s0, -16 diff --git a/llvm/test/CodeGen/RISCV/alu64.ll b/llvm/test/CodeGen/RISCV/alu64.ll index dd33606..0a44a38 100644 --- a/llvm/test/CodeGen/RISCV/alu64.ll +++ b/llvm/test/CodeGen/RISCV/alu64.ll @@ -123,8 +123,8 @@ define i64 @slli(i64 %a) nounwind { ; ; RV32I-LABEL: slli: ; RV32I: # %bb.0: -; RV32I-NEXT: srli a2, a0, 25 ; RV32I-NEXT: slli a1, a1, 7 +; RV32I-NEXT: srli a2, a0, 25 ; RV32I-NEXT: or a1, a1, a2 ; RV32I-NEXT: slli a0, a0, 7 ; RV32I-NEXT: ret @@ -140,8 +140,8 @@ define i64 @srli(i64 %a) nounwind { ; ; RV32I-LABEL: srli: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a2, a1, 24 ; RV32I-NEXT: srli a0, a0, 8 +; RV32I-NEXT: slli a2, a1, 24 ; RV32I-NEXT: or a0, a0, a2 ; RV32I-NEXT: srli a1, a1, 8 ; RV32I-NEXT: ret @@ -157,8 +157,8 @@ define i64 @srai(i64 %a) nounwind { ; ; RV32I-LABEL: srai: ; RV32I: # %bb.0: -; RV32I-NEXT: slli a2, a1, 23 ; RV32I-NEXT: srli a0, a0, 9 +; RV32I-NEXT: slli a2, a1, 23 ; RV32I-NEXT: or a0, a0, a2 ; RV32I-NEXT: srai a1, a1, 9 ; RV32I-NEXT: ret @@ -194,9 +194,9 @@ define i64 @sub(i64 %a, i64 %b) nounwind { ; ; RV32I-LABEL: sub: ; RV32I: # %bb.0: -; RV32I-NEXT: sltu a4, a0, a2 ; RV32I-NEXT: sub a1, a1, a3 -; RV32I-NEXT: sub a1, a1, a4 +; RV32I-NEXT: sltu a3, a0, a2 +; RV32I-NEXT: sub a1, a1, a3 ; RV32I-NEXT: sub a0, a0, a2 ; RV32I-NEXT: ret %1 = sub i64 %a, %b @@ -218,14 +218,13 @@ define i64 @sll(i64 %a, i64 %b) nounwind { ; RV32I-NEXT: mv a0, zero ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB11_2: -; RV32I-NEXT: sll a1, a1, a2 ; RV32I-NEXT: addi a3, zero, 31 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: srli a4, a0, 1 ; RV32I-NEXT: srl a3, a4, a3 +; RV32I-NEXT: sll a1, a1, a2 ; RV32I-NEXT: or a1, a1, a3 -; RV32I-NEXT: sll a2, a0, a2 -; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: sll a0, a0, a2 ; RV32I-NEXT: ret %1 = shl i64 %a, %b ret i64 %1 @@ -305,14 +304,13 @@ define i64 @srl(i64 %a, i64 %b) nounwind { ; RV32I-NEXT: mv a1, zero ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB15_2: -; RV32I-NEXT: srl a0, a0, a2 ; RV32I-NEXT: addi a3, zero, 31 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: slli a4, a1, 1 ; RV32I-NEXT: sll a3, a4, a3 +; RV32I-NEXT: srl a0, a0, a2 ; RV32I-NEXT: or a0, a0, a3 -; RV32I-NEXT: srl a2, a1, a2 -; RV32I-NEXT: mv a1, a2 +; RV32I-NEXT: srl a1, a1, a2 ; RV32I-NEXT: ret %1 = lshr i64 %a, %b ret i64 %1 @@ -333,11 +331,11 @@ define i64 @sra(i64 %a, i64 %b) nounwind { ; RV32I-NEXT: srai a1, a1, 31 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB16_2: -; RV32I-NEXT: srl a0, a0, a2 ; RV32I-NEXT: addi a3, zero, 31 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: slli a4, a1, 1 ; RV32I-NEXT: sll a3, a4, a3 +; RV32I-NEXT: srl a0, a0, a2 ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: sra a1, a1, a2 ; RV32I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/arith-with-overflow.ll b/llvm/test/CodeGen/RISCV/arith-with-overflow.ll index 8d72082..3fb6342 100644 --- a/llvm/test/CodeGen/RISCV/arith-with-overflow.ll +++ b/llvm/test/CodeGen/RISCV/arith-with-overflow.ll @@ -10,17 +10,17 @@ declare {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) define i1 @sadd(i32 %a, i32 %b, i32* %c) nounwind { ; RV32I-LABEL: sadd: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: addi a3, zero, -1 -; RV32I-NEXT: slt a4, a3, a1 -; RV32I-NEXT: slt a5, a3, a0 -; RV32I-NEXT: xor a4, a5, a4 -; RV32I-NEXT: seqz a4, a4 -; RV32I-NEXT: add a1, a0, a1 -; RV32I-NEXT: slt a0, a3, a1 -; RV32I-NEXT: xor a0, a5, a0 -; RV32I-NEXT: snez a0, a0 -; RV32I-NEXT: and a0, a4, a0 -; RV32I-NEXT: sw a1, 0(a2) +; RV32I-NEXT: add a3, a0, a1 +; RV32I-NEXT: sw a3, 0(a2) +; RV32I-NEXT: addi a2, zero, -1 +; RV32I-NEXT: slt a1, a2, a1 +; RV32I-NEXT: slt a0, a2, a0 +; RV32I-NEXT: slt a2, a2, a3 +; RV32I-NEXT: xor a2, a0, a2 +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: seqz a0, a0 +; RV32I-NEXT: snez a1, a2 +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret entry: %x = call {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b) @@ -33,17 +33,17 @@ entry: define i1 @ssub(i32 %a, i32 %b, i32* %c) nounwind { ; RV32I-LABEL: ssub: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: addi a3, zero, -1 -; RV32I-NEXT: slt a4, a3, a1 -; RV32I-NEXT: slt a5, a3, a0 -; RV32I-NEXT: xor a4, a5, a4 -; RV32I-NEXT: snez a4, a4 -; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: slt a0, a3, a1 -; RV32I-NEXT: xor a0, a5, a0 +; RV32I-NEXT: sub a3, a0, a1 +; RV32I-NEXT: sw a3, 0(a2) +; RV32I-NEXT: addi a2, zero, -1 +; RV32I-NEXT: slt a1, a2, a1 +; RV32I-NEXT: slt a0, a2, a0 +; RV32I-NEXT: slt a2, a2, a3 +; RV32I-NEXT: xor a2, a0, a2 +; RV32I-NEXT: xor a0, a0, a1 ; RV32I-NEXT: snez a0, a0 -; RV32I-NEXT: and a0, a4, a0 -; RV32I-NEXT: sw a1, 0(a2) +; RV32I-NEXT: snez a1, a2 +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret entry: %x = call {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b) @@ -57,8 +57,8 @@ define i1 @uadd(i32 %a, i32 %b, i32* %c) nounwind { ; RV32I-LABEL: uadd: ; RV32I: # %bb.0: # %entry ; RV32I-NEXT: add a1, a0, a1 -; RV32I-NEXT: sltu a0, a1, a0 ; RV32I-NEXT: sw a1, 0(a2) +; RV32I-NEXT: sltu a0, a1, a0 ; RV32I-NEXT: ret entry: %x = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) @@ -72,8 +72,8 @@ define i1 @usub(i32 %a, i32 %b, i32* %c) nounwind { ; RV32I-LABEL: usub: ; RV32I: # %bb.0: # %entry ; RV32I-NEXT: sub a1, a0, a1 -; RV32I-NEXT: sltu a0, a0, a1 ; RV32I-NEXT: sw a1, 0(a2) +; RV32I-NEXT: sltu a0, a0, a1 ; RV32I-NEXT: ret entry: %x = call {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b) diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll index 082b92b..b331b4b 100644 --- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll +++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll @@ -7,6 +7,7 @@ ; higher bits were masked to zero for the comparison. define i1 @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 signext %cmp, + i32 signext %val) nounwind { ; RV64IA-LABEL: cmpxchg_i32_seq_cst_seq_cst: ; RV64IA: # %bb.0: # %entry ; RV64IA-NEXT: .LBB0_1: # %entry @@ -21,7 +22,6 @@ define i1 @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 signext %cmp, ; RV64IA-NEXT: xor a0, a3, a1 ; RV64IA-NEXT: seqz a0, a0 ; RV64IA-NEXT: ret - i32 signext %val) nounwind { entry: %0 = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst %1 = extractvalue { i32, i1 } %0, 1 diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll index 43da05e..a4526b7 100644 --- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll +++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll @@ -24,24 +24,24 @@ define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind ; ; RV32IA-LABEL: cmpxchg_i8_monotonic_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 ; RV32IA-NEXT: addi a4, zero, 255 -; RV32IA-NEXT: sll a4, a4, a0 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a4, a4, a3 ; RV32IA-NEXT: andi a2, a2, 255 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a2, (a3) -; RV32IA-NEXT: and a5, a2, a4 +; RV32IA-NEXT: lr.w a3, (a0) +; RV32IA-NEXT: and a5, a3, a4 ; RV32IA-NEXT: bne a5, a1, .LBB0_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB0_1 Depth=1 -; RV32IA-NEXT: xor a5, a2, a0 +; RV32IA-NEXT: xor a5, a3, a2 ; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a2, a5 -; RV32IA-NEXT: sc.w a5, a5, (a3) +; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB0_1 ; RV32IA-NEXT: .LBB0_3: ; RV32IA-NEXT: ret @@ -61,24 +61,24 @@ define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind ; ; RV64IA-LABEL: cmpxchg_i8_monotonic_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 -; RV64IA-NEXT: sllw a4, a4, a0 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a2, (a3) -; RV64IA-NEXT: and a5, a2, a4 +; RV64IA-NEXT: lr.w a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 ; RV64IA-NEXT: bne a5, a1, .LBB0_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB0_1 Depth=1 -; RV64IA-NEXT: xor a5, a2, a0 +; RV64IA-NEXT: xor a5, a3, a2 ; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a2, a5 -; RV64IA-NEXT: sc.w a5, a5, (a3) +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB0_1 ; RV64IA-NEXT: .LBB0_3: ; RV64IA-NEXT: ret @@ -102,24 +102,24 @@ define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV32IA-LABEL: cmpxchg_i8_acquire_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 ; RV32IA-NEXT: addi a4, zero, 255 -; RV32IA-NEXT: sll a4, a4, a0 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a4, a4, a3 ; RV32IA-NEXT: andi a2, a2, 255 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a2, (a3) -; RV32IA-NEXT: and a5, a2, a4 +; RV32IA-NEXT: lr.w.aq a3, (a0) +; RV32IA-NEXT: and a5, a3, a4 ; RV32IA-NEXT: bne a5, a1, .LBB1_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB1_1 Depth=1 -; RV32IA-NEXT: xor a5, a2, a0 +; RV32IA-NEXT: xor a5, a3, a2 ; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a2, a5 -; RV32IA-NEXT: sc.w a5, a5, (a3) +; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB1_1 ; RV32IA-NEXT: .LBB1_3: ; RV32IA-NEXT: ret @@ -139,24 +139,24 @@ define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV64IA-LABEL: cmpxchg_i8_acquire_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 -; RV64IA-NEXT: sllw a4, a4, a0 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a2, (a3) -; RV64IA-NEXT: and a5, a2, a4 +; RV64IA-NEXT: lr.w.aq a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 ; RV64IA-NEXT: bne a5, a1, .LBB1_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB1_1 Depth=1 -; RV64IA-NEXT: xor a5, a2, a0 +; RV64IA-NEXT: xor a5, a3, a2 ; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a2, a5 -; RV64IA-NEXT: sc.w a5, a5, (a3) +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB1_1 ; RV64IA-NEXT: .LBB1_3: ; RV64IA-NEXT: ret @@ -180,24 +180,24 @@ define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV32IA-LABEL: cmpxchg_i8_acquire_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 ; RV32IA-NEXT: addi a4, zero, 255 -; RV32IA-NEXT: sll a4, a4, a0 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a4, a4, a3 ; RV32IA-NEXT: andi a2, a2, 255 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a2, (a3) -; RV32IA-NEXT: and a5, a2, a4 +; RV32IA-NEXT: lr.w.aq a3, (a0) +; RV32IA-NEXT: and a5, a3, a4 ; RV32IA-NEXT: bne a5, a1, .LBB2_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB2_1 Depth=1 -; RV32IA-NEXT: xor a5, a2, a0 +; RV32IA-NEXT: xor a5, a3, a2 ; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a2, a5 -; RV32IA-NEXT: sc.w a5, a5, (a3) +; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB2_1 ; RV32IA-NEXT: .LBB2_3: ; RV32IA-NEXT: ret @@ -217,24 +217,24 @@ define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV64IA-LABEL: cmpxchg_i8_acquire_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 -; RV64IA-NEXT: sllw a4, a4, a0 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a2, (a3) -; RV64IA-NEXT: and a5, a2, a4 +; RV64IA-NEXT: lr.w.aq a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 ; RV64IA-NEXT: bne a5, a1, .LBB2_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB2_1 Depth=1 -; RV64IA-NEXT: xor a5, a2, a0 +; RV64IA-NEXT: xor a5, a3, a2 ; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a2, a5 -; RV64IA-NEXT: sc.w a5, a5, (a3) +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB2_1 ; RV64IA-NEXT: .LBB2_3: ; RV64IA-NEXT: ret @@ -258,24 +258,24 @@ define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV32IA-LABEL: cmpxchg_i8_release_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 ; RV32IA-NEXT: addi a4, zero, 255 -; RV32IA-NEXT: sll a4, a4, a0 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a4, a4, a3 ; RV32IA-NEXT: andi a2, a2, 255 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a2, (a3) -; RV32IA-NEXT: and a5, a2, a4 +; RV32IA-NEXT: lr.w a3, (a0) +; RV32IA-NEXT: and a5, a3, a4 ; RV32IA-NEXT: bne a5, a1, .LBB3_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB3_1 Depth=1 -; RV32IA-NEXT: xor a5, a2, a0 +; RV32IA-NEXT: xor a5, a3, a2 ; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a2, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a3) +; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB3_1 ; RV32IA-NEXT: .LBB3_3: ; RV32IA-NEXT: ret @@ -295,24 +295,24 @@ define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV64IA-LABEL: cmpxchg_i8_release_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 -; RV64IA-NEXT: sllw a4, a4, a0 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a2, (a3) -; RV64IA-NEXT: and a5, a2, a4 +; RV64IA-NEXT: lr.w a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 ; RV64IA-NEXT: bne a5, a1, .LBB3_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB3_1 Depth=1 -; RV64IA-NEXT: xor a5, a2, a0 +; RV64IA-NEXT: xor a5, a3, a2 ; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a2, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a3) +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB3_1 ; RV64IA-NEXT: .LBB3_3: ; RV64IA-NEXT: ret @@ -336,24 +336,24 @@ define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV32IA-LABEL: cmpxchg_i8_release_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 ; RV32IA-NEXT: addi a4, zero, 255 -; RV32IA-NEXT: sll a4, a4, a0 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a4, a4, a3 ; RV32IA-NEXT: andi a2, a2, 255 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a2, (a3) -; RV32IA-NEXT: and a5, a2, a4 +; RV32IA-NEXT: lr.w a3, (a0) +; RV32IA-NEXT: and a5, a3, a4 ; RV32IA-NEXT: bne a5, a1, .LBB4_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB4_1 Depth=1 -; RV32IA-NEXT: xor a5, a2, a0 +; RV32IA-NEXT: xor a5, a3, a2 ; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a2, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a3) +; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB4_1 ; RV32IA-NEXT: .LBB4_3: ; RV32IA-NEXT: ret @@ -373,24 +373,24 @@ define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV64IA-LABEL: cmpxchg_i8_release_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 -; RV64IA-NEXT: sllw a4, a4, a0 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a2, (a3) -; RV64IA-NEXT: and a5, a2, a4 +; RV64IA-NEXT: lr.w a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 ; RV64IA-NEXT: bne a5, a1, .LBB4_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB4_1 Depth=1 -; RV64IA-NEXT: xor a5, a2, a0 +; RV64IA-NEXT: xor a5, a3, a2 ; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a2, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a3) +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB4_1 ; RV64IA-NEXT: .LBB4_3: ; RV64IA-NEXT: ret @@ -414,24 +414,24 @@ define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV32IA-LABEL: cmpxchg_i8_acq_rel_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 ; RV32IA-NEXT: addi a4, zero, 255 -; RV32IA-NEXT: sll a4, a4, a0 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a4, a4, a3 ; RV32IA-NEXT: andi a2, a2, 255 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a2, (a3) -; RV32IA-NEXT: and a5, a2, a4 +; RV32IA-NEXT: lr.w.aq a3, (a0) +; RV32IA-NEXT: and a5, a3, a4 ; RV32IA-NEXT: bne a5, a1, .LBB5_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 -; RV32IA-NEXT: xor a5, a2, a0 +; RV32IA-NEXT: xor a5, a3, a2 ; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a2, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a3) +; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB5_1 ; RV32IA-NEXT: .LBB5_3: ; RV32IA-NEXT: ret @@ -451,24 +451,24 @@ define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV64IA-LABEL: cmpxchg_i8_acq_rel_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 -; RV64IA-NEXT: sllw a4, a4, a0 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a2, (a3) -; RV64IA-NEXT: and a5, a2, a4 +; RV64IA-NEXT: lr.w.aq a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 ; RV64IA-NEXT: bne a5, a1, .LBB5_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB5_1 Depth=1 -; RV64IA-NEXT: xor a5, a2, a0 +; RV64IA-NEXT: xor a5, a3, a2 ; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a2, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a3) +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB5_1 ; RV64IA-NEXT: .LBB5_3: ; RV64IA-NEXT: ret @@ -492,24 +492,24 @@ define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV32IA-LABEL: cmpxchg_i8_acq_rel_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 ; RV32IA-NEXT: addi a4, zero, 255 -; RV32IA-NEXT: sll a4, a4, a0 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a4, a4, a3 ; RV32IA-NEXT: andi a2, a2, 255 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a2, (a3) -; RV32IA-NEXT: and a5, a2, a4 +; RV32IA-NEXT: lr.w.aq a3, (a0) +; RV32IA-NEXT: and a5, a3, a4 ; RV32IA-NEXT: bne a5, a1, .LBB6_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 -; RV32IA-NEXT: xor a5, a2, a0 +; RV32IA-NEXT: xor a5, a3, a2 ; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a2, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a3) +; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB6_1 ; RV32IA-NEXT: .LBB6_3: ; RV32IA-NEXT: ret @@ -529,24 +529,24 @@ define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV64IA-LABEL: cmpxchg_i8_acq_rel_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 -; RV64IA-NEXT: sllw a4, a4, a0 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a2, (a3) -; RV64IA-NEXT: and a5, a2, a4 +; RV64IA-NEXT: lr.w.aq a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 ; RV64IA-NEXT: bne a5, a1, .LBB6_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB6_1 Depth=1 -; RV64IA-NEXT: xor a5, a2, a0 +; RV64IA-NEXT: xor a5, a3, a2 ; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a2, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a3) +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB6_1 ; RV64IA-NEXT: .LBB6_3: ; RV64IA-NEXT: ret @@ -570,24 +570,24 @@ define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV32IA-LABEL: cmpxchg_i8_seq_cst_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 ; RV32IA-NEXT: addi a4, zero, 255 -; RV32IA-NEXT: sll a4, a4, a0 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a4, a4, a3 ; RV32IA-NEXT: andi a2, a2, 255 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a2, (a3) -; RV32IA-NEXT: and a5, a2, a4 +; RV32IA-NEXT: lr.w.aqrl a3, (a0) +; RV32IA-NEXT: and a5, a3, a4 ; RV32IA-NEXT: bne a5, a1, .LBB7_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 -; RV32IA-NEXT: xor a5, a2, a0 +; RV32IA-NEXT: xor a5, a3, a2 ; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a2, a5 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a3) +; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB7_1 ; RV32IA-NEXT: .LBB7_3: ; RV32IA-NEXT: ret @@ -607,24 +607,24 @@ define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV64IA-LABEL: cmpxchg_i8_seq_cst_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 -; RV64IA-NEXT: sllw a4, a4, a0 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a2, (a3) -; RV64IA-NEXT: and a5, a2, a4 +; RV64IA-NEXT: lr.w.aqrl a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 ; RV64IA-NEXT: bne a5, a1, .LBB7_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB7_1 Depth=1 -; RV64IA-NEXT: xor a5, a2, a0 +; RV64IA-NEXT: xor a5, a3, a2 ; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a2, a5 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a3) +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB7_1 ; RV64IA-NEXT: .LBB7_3: ; RV64IA-NEXT: ret @@ -648,24 +648,24 @@ define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV32IA-LABEL: cmpxchg_i8_seq_cst_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 ; RV32IA-NEXT: addi a4, zero, 255 -; RV32IA-NEXT: sll a4, a4, a0 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a4, a4, a3 ; RV32IA-NEXT: andi a2, a2, 255 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a2, (a3) -; RV32IA-NEXT: and a5, a2, a4 +; RV32IA-NEXT: lr.w.aqrl a3, (a0) +; RV32IA-NEXT: and a5, a3, a4 ; RV32IA-NEXT: bne a5, a1, .LBB8_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 -; RV32IA-NEXT: xor a5, a2, a0 +; RV32IA-NEXT: xor a5, a3, a2 ; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a2, a5 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a3) +; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB8_1 ; RV32IA-NEXT: .LBB8_3: ; RV32IA-NEXT: ret @@ -685,24 +685,24 @@ define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV64IA-LABEL: cmpxchg_i8_seq_cst_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 -; RV64IA-NEXT: sllw a4, a4, a0 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a2, (a3) -; RV64IA-NEXT: and a5, a2, a4 +; RV64IA-NEXT: lr.w.aqrl a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 ; RV64IA-NEXT: bne a5, a1, .LBB8_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB8_1 Depth=1 -; RV64IA-NEXT: xor a5, a2, a0 +; RV64IA-NEXT: xor a5, a3, a2 ; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a2, a5 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a3) +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB8_1 ; RV64IA-NEXT: .LBB8_3: ; RV64IA-NEXT: ret @@ -726,24 +726,24 @@ define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV32IA-LABEL: cmpxchg_i8_seq_cst_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 ; RV32IA-NEXT: addi a4, zero, 255 -; RV32IA-NEXT: sll a4, a4, a0 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a4, a4, a3 ; RV32IA-NEXT: andi a2, a2, 255 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a2, (a3) -; RV32IA-NEXT: and a5, a2, a4 +; RV32IA-NEXT: lr.w.aqrl a3, (a0) +; RV32IA-NEXT: and a5, a3, a4 ; RV32IA-NEXT: bne a5, a1, .LBB9_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 -; RV32IA-NEXT: xor a5, a2, a0 +; RV32IA-NEXT: xor a5, a3, a2 ; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a2, a5 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a3) +; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB9_1 ; RV32IA-NEXT: .LBB9_3: ; RV32IA-NEXT: ret @@ -763,24 +763,24 @@ define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) nounwind { ; ; RV64IA-LABEL: cmpxchg_i8_seq_cst_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 ; RV64IA-NEXT: addi a4, zero, 255 -; RV64IA-NEXT: sllw a4, a4, a0 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a4, a4, a3 ; RV64IA-NEXT: andi a2, a2, 255 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a2, (a3) -; RV64IA-NEXT: and a5, a2, a4 +; RV64IA-NEXT: lr.w.aqrl a3, (a0) +; RV64IA-NEXT: and a5, a3, a4 ; RV64IA-NEXT: bne a5, a1, .LBB9_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB9_1 Depth=1 -; RV64IA-NEXT: xor a5, a2, a0 +; RV64IA-NEXT: xor a5, a3, a2 ; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a2, a5 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a3) +; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB9_1 ; RV64IA-NEXT: .LBB9_3: ; RV64IA-NEXT: ret @@ -804,26 +804,26 @@ define void @cmpxchg_i16_monotonic_monotonic(i16* %ptr, i16 %cmp, i16 %val) noun ; ; RV32IA-LABEL: cmpxchg_i16_monotonic_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a4, 16 -; RV32IA-NEXT: addi a4, a4, -1 -; RV32IA-NEXT: sll a5, a4, a0 -; RV32IA-NEXT: and a1, a1, a4 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: and a2, a2, a4 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: lui a3, 16 +; RV32IA-NEXT: addi a3, a3, -1 +; RV32IA-NEXT: and a1, a1, a3 +; RV32IA-NEXT: and a2, a2, a3 +; RV32IA-NEXT: slli a4, a0, 3 +; RV32IA-NEXT: andi a4, a4, 24 +; RV32IA-NEXT: sll a3, a3, a4 +; RV32IA-NEXT: sll a2, a2, a4 +; RV32IA-NEXT: sll a1, a1, a4 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a2, (a3) -; RV32IA-NEXT: and a4, a2, a5 -; RV32IA-NEXT: bne a4, a1, .LBB10_3 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a5, a4, a3 +; RV32IA-NEXT: bne a5, a1, .LBB10_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1 -; RV32IA-NEXT: xor a4, a2, a0 -; RV32IA-NEXT: and a4, a4, a5 -; RV32IA-NEXT: xor a4, a2, a4 -; RV32IA-NEXT: sc.w a4, a4, (a3) -; RV32IA-NEXT: bnez a4, .LBB10_1 +; RV32IA-NEXT: xor a5, a4, a2 +; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) +; RV32IA-NEXT: bnez a5, .LBB10_1 ; RV32IA-NEXT: .LBB10_3: ; RV32IA-NEXT: ret ; @@ -842,26 +842,26 @@ define void @cmpxchg_i16_monotonic_monotonic(i16* %ptr, i16 %cmp, i16 %val) noun ; ; RV64IA-LABEL: cmpxchg_i16_monotonic_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a4, 16 -; RV64IA-NEXT: addiw a4, a4, -1 -; RV64IA-NEXT: sllw a5, a4, a0 -; RV64IA-NEXT: and a1, a1, a4 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: and a2, a2, a4 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a2, (a3) -; RV64IA-NEXT: and a4, a2, a5 -; RV64IA-NEXT: bne a4, a1, .LBB10_3 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB10_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1 -; RV64IA-NEXT: xor a4, a2, a0 -; RV64IA-NEXT: and a4, a4, a5 -; RV64IA-NEXT: xor a4, a2, a4 -; RV64IA-NEXT: sc.w a4, a4, (a3) -; RV64IA-NEXT: bnez a4, .LBB10_1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB10_1 ; RV64IA-NEXT: .LBB10_3: ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic @@ -884,26 +884,26 @@ define void @cmpxchg_i16_acquire_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi ; ; RV32IA-LABEL: cmpxchg_i16_acquire_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a4, 16 -; RV32IA-NEXT: addi a4, a4, -1 -; RV32IA-NEXT: sll a5, a4, a0 -; RV32IA-NEXT: and a1, a1, a4 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: and a2, a2, a4 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: lui a3, 16 +; RV32IA-NEXT: addi a3, a3, -1 +; RV32IA-NEXT: and a1, a1, a3 +; RV32IA-NEXT: and a2, a2, a3 +; RV32IA-NEXT: slli a4, a0, 3 +; RV32IA-NEXT: andi a4, a4, 24 +; RV32IA-NEXT: sll a3, a3, a4 +; RV32IA-NEXT: sll a2, a2, a4 +; RV32IA-NEXT: sll a1, a1, a4 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a2, (a3) -; RV32IA-NEXT: and a4, a2, a5 -; RV32IA-NEXT: bne a4, a1, .LBB11_3 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a5, a4, a3 +; RV32IA-NEXT: bne a5, a1, .LBB11_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 -; RV32IA-NEXT: xor a4, a2, a0 -; RV32IA-NEXT: and a4, a4, a5 -; RV32IA-NEXT: xor a4, a2, a4 -; RV32IA-NEXT: sc.w a4, a4, (a3) -; RV32IA-NEXT: bnez a4, .LBB11_1 +; RV32IA-NEXT: xor a5, a4, a2 +; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) +; RV32IA-NEXT: bnez a5, .LBB11_1 ; RV32IA-NEXT: .LBB11_3: ; RV32IA-NEXT: ret ; @@ -922,26 +922,26 @@ define void @cmpxchg_i16_acquire_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi ; ; RV64IA-LABEL: cmpxchg_i16_acquire_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a4, 16 -; RV64IA-NEXT: addiw a4, a4, -1 -; RV64IA-NEXT: sllw a5, a4, a0 -; RV64IA-NEXT: and a1, a1, a4 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: and a2, a2, a4 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a2, (a3) -; RV64IA-NEXT: and a4, a2, a5 -; RV64IA-NEXT: bne a4, a1, .LBB11_3 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB11_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1 -; RV64IA-NEXT: xor a4, a2, a0 -; RV64IA-NEXT: and a4, a4, a5 -; RV64IA-NEXT: xor a4, a2, a4 -; RV64IA-NEXT: sc.w a4, a4, (a3) -; RV64IA-NEXT: bnez a4, .LBB11_1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB11_1 ; RV64IA-NEXT: .LBB11_3: ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic @@ -964,26 +964,26 @@ define void @cmpxchg_i16_acquire_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind ; ; RV32IA-LABEL: cmpxchg_i16_acquire_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a4, 16 -; RV32IA-NEXT: addi a4, a4, -1 -; RV32IA-NEXT: sll a5, a4, a0 -; RV32IA-NEXT: and a1, a1, a4 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: and a2, a2, a4 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: lui a3, 16 +; RV32IA-NEXT: addi a3, a3, -1 +; RV32IA-NEXT: and a1, a1, a3 +; RV32IA-NEXT: and a2, a2, a3 +; RV32IA-NEXT: slli a4, a0, 3 +; RV32IA-NEXT: andi a4, a4, 24 +; RV32IA-NEXT: sll a3, a3, a4 +; RV32IA-NEXT: sll a2, a2, a4 +; RV32IA-NEXT: sll a1, a1, a4 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a2, (a3) -; RV32IA-NEXT: and a4, a2, a5 -; RV32IA-NEXT: bne a4, a1, .LBB12_3 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a5, a4, a3 +; RV32IA-NEXT: bne a5, a1, .LBB12_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB12_1 Depth=1 -; RV32IA-NEXT: xor a4, a2, a0 -; RV32IA-NEXT: and a4, a4, a5 -; RV32IA-NEXT: xor a4, a2, a4 -; RV32IA-NEXT: sc.w a4, a4, (a3) -; RV32IA-NEXT: bnez a4, .LBB12_1 +; RV32IA-NEXT: xor a5, a4, a2 +; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) +; RV32IA-NEXT: bnez a5, .LBB12_1 ; RV32IA-NEXT: .LBB12_3: ; RV32IA-NEXT: ret ; @@ -1002,26 +1002,26 @@ define void @cmpxchg_i16_acquire_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind ; ; RV64IA-LABEL: cmpxchg_i16_acquire_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a4, 16 -; RV64IA-NEXT: addiw a4, a4, -1 -; RV64IA-NEXT: sllw a5, a4, a0 -; RV64IA-NEXT: and a1, a1, a4 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: and a2, a2, a4 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a2, (a3) -; RV64IA-NEXT: and a4, a2, a5 -; RV64IA-NEXT: bne a4, a1, .LBB12_3 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB12_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB12_1 Depth=1 -; RV64IA-NEXT: xor a4, a2, a0 -; RV64IA-NEXT: and a4, a4, a5 -; RV64IA-NEXT: xor a4, a2, a4 -; RV64IA-NEXT: sc.w a4, a4, (a3) -; RV64IA-NEXT: bnez a4, .LBB12_1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB12_1 ; RV64IA-NEXT: .LBB12_3: ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire @@ -1044,26 +1044,26 @@ define void @cmpxchg_i16_release_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi ; ; RV32IA-LABEL: cmpxchg_i16_release_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a4, 16 -; RV32IA-NEXT: addi a4, a4, -1 -; RV32IA-NEXT: sll a5, a4, a0 -; RV32IA-NEXT: and a1, a1, a4 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: and a2, a2, a4 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: lui a3, 16 +; RV32IA-NEXT: addi a3, a3, -1 +; RV32IA-NEXT: and a1, a1, a3 +; RV32IA-NEXT: and a2, a2, a3 +; RV32IA-NEXT: slli a4, a0, 3 +; RV32IA-NEXT: andi a4, a4, 24 +; RV32IA-NEXT: sll a3, a3, a4 +; RV32IA-NEXT: sll a2, a2, a4 +; RV32IA-NEXT: sll a1, a1, a4 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a2, (a3) -; RV32IA-NEXT: and a4, a2, a5 -; RV32IA-NEXT: bne a4, a1, .LBB13_3 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a5, a4, a3 +; RV32IA-NEXT: bne a5, a1, .LBB13_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1 -; RV32IA-NEXT: xor a4, a2, a0 -; RV32IA-NEXT: and a4, a4, a5 -; RV32IA-NEXT: xor a4, a2, a4 -; RV32IA-NEXT: sc.w.rl a4, a4, (a3) -; RV32IA-NEXT: bnez a4, .LBB13_1 +; RV32IA-NEXT: xor a5, a4, a2 +; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) +; RV32IA-NEXT: bnez a5, .LBB13_1 ; RV32IA-NEXT: .LBB13_3: ; RV32IA-NEXT: ret ; @@ -1082,26 +1082,26 @@ define void @cmpxchg_i16_release_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi ; ; RV64IA-LABEL: cmpxchg_i16_release_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a4, 16 -; RV64IA-NEXT: addiw a4, a4, -1 -; RV64IA-NEXT: sllw a5, a4, a0 -; RV64IA-NEXT: and a1, a1, a4 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: and a2, a2, a4 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a2, (a3) -; RV64IA-NEXT: and a4, a2, a5 -; RV64IA-NEXT: bne a4, a1, .LBB13_3 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB13_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1 -; RV64IA-NEXT: xor a4, a2, a0 -; RV64IA-NEXT: and a4, a4, a5 -; RV64IA-NEXT: xor a4, a2, a4 -; RV64IA-NEXT: sc.w.rl a4, a4, (a3) -; RV64IA-NEXT: bnez a4, .LBB13_1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB13_1 ; RV64IA-NEXT: .LBB13_3: ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic @@ -1124,26 +1124,26 @@ define void @cmpxchg_i16_release_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind ; ; RV32IA-LABEL: cmpxchg_i16_release_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a4, 16 -; RV32IA-NEXT: addi a4, a4, -1 -; RV32IA-NEXT: sll a5, a4, a0 -; RV32IA-NEXT: and a1, a1, a4 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: and a2, a2, a4 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: lui a3, 16 +; RV32IA-NEXT: addi a3, a3, -1 +; RV32IA-NEXT: and a1, a1, a3 +; RV32IA-NEXT: and a2, a2, a3 +; RV32IA-NEXT: slli a4, a0, 3 +; RV32IA-NEXT: andi a4, a4, 24 +; RV32IA-NEXT: sll a3, a3, a4 +; RV32IA-NEXT: sll a2, a2, a4 +; RV32IA-NEXT: sll a1, a1, a4 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a2, (a3) -; RV32IA-NEXT: and a4, a2, a5 -; RV32IA-NEXT: bne a4, a1, .LBB14_3 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a5, a4, a3 +; RV32IA-NEXT: bne a5, a1, .LBB14_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB14_1 Depth=1 -; RV32IA-NEXT: xor a4, a2, a0 -; RV32IA-NEXT: and a4, a4, a5 -; RV32IA-NEXT: xor a4, a2, a4 -; RV32IA-NEXT: sc.w.rl a4, a4, (a3) -; RV32IA-NEXT: bnez a4, .LBB14_1 +; RV32IA-NEXT: xor a5, a4, a2 +; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) +; RV32IA-NEXT: bnez a5, .LBB14_1 ; RV32IA-NEXT: .LBB14_3: ; RV32IA-NEXT: ret ; @@ -1162,26 +1162,26 @@ define void @cmpxchg_i16_release_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind ; ; RV64IA-LABEL: cmpxchg_i16_release_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a4, 16 -; RV64IA-NEXT: addiw a4, a4, -1 -; RV64IA-NEXT: sllw a5, a4, a0 -; RV64IA-NEXT: and a1, a1, a4 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: and a2, a2, a4 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a2, (a3) -; RV64IA-NEXT: and a4, a2, a5 -; RV64IA-NEXT: bne a4, a1, .LBB14_3 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB14_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB14_1 Depth=1 -; RV64IA-NEXT: xor a4, a2, a0 -; RV64IA-NEXT: and a4, a4, a5 -; RV64IA-NEXT: xor a4, a2, a4 -; RV64IA-NEXT: sc.w.rl a4, a4, (a3) -; RV64IA-NEXT: bnez a4, .LBB14_1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB14_1 ; RV64IA-NEXT: .LBB14_3: ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire @@ -1204,26 +1204,26 @@ define void @cmpxchg_i16_acq_rel_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi ; ; RV32IA-LABEL: cmpxchg_i16_acq_rel_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a4, 16 -; RV32IA-NEXT: addi a4, a4, -1 -; RV32IA-NEXT: sll a5, a4, a0 -; RV32IA-NEXT: and a1, a1, a4 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: and a2, a2, a4 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: lui a3, 16 +; RV32IA-NEXT: addi a3, a3, -1 +; RV32IA-NEXT: and a1, a1, a3 +; RV32IA-NEXT: and a2, a2, a3 +; RV32IA-NEXT: slli a4, a0, 3 +; RV32IA-NEXT: andi a4, a4, 24 +; RV32IA-NEXT: sll a3, a3, a4 +; RV32IA-NEXT: sll a2, a2, a4 +; RV32IA-NEXT: sll a1, a1, a4 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a2, (a3) -; RV32IA-NEXT: and a4, a2, a5 -; RV32IA-NEXT: bne a4, a1, .LBB15_3 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a5, a4, a3 +; RV32IA-NEXT: bne a5, a1, .LBB15_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB15_1 Depth=1 -; RV32IA-NEXT: xor a4, a2, a0 -; RV32IA-NEXT: and a4, a4, a5 -; RV32IA-NEXT: xor a4, a2, a4 -; RV32IA-NEXT: sc.w.rl a4, a4, (a3) -; RV32IA-NEXT: bnez a4, .LBB15_1 +; RV32IA-NEXT: xor a5, a4, a2 +; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) +; RV32IA-NEXT: bnez a5, .LBB15_1 ; RV32IA-NEXT: .LBB15_3: ; RV32IA-NEXT: ret ; @@ -1242,26 +1242,26 @@ define void @cmpxchg_i16_acq_rel_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi ; ; RV64IA-LABEL: cmpxchg_i16_acq_rel_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a4, 16 -; RV64IA-NEXT: addiw a4, a4, -1 -; RV64IA-NEXT: sllw a5, a4, a0 -; RV64IA-NEXT: and a1, a1, a4 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: and a2, a2, a4 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a2, (a3) -; RV64IA-NEXT: and a4, a2, a5 -; RV64IA-NEXT: bne a4, a1, .LBB15_3 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB15_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB15_1 Depth=1 -; RV64IA-NEXT: xor a4, a2, a0 -; RV64IA-NEXT: and a4, a4, a5 -; RV64IA-NEXT: xor a4, a2, a4 -; RV64IA-NEXT: sc.w.rl a4, a4, (a3) -; RV64IA-NEXT: bnez a4, .LBB15_1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB15_1 ; RV64IA-NEXT: .LBB15_3: ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic @@ -1284,26 +1284,26 @@ define void @cmpxchg_i16_acq_rel_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind ; ; RV32IA-LABEL: cmpxchg_i16_acq_rel_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a4, 16 -; RV32IA-NEXT: addi a4, a4, -1 -; RV32IA-NEXT: sll a5, a4, a0 -; RV32IA-NEXT: and a1, a1, a4 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: and a2, a2, a4 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: lui a3, 16 +; RV32IA-NEXT: addi a3, a3, -1 +; RV32IA-NEXT: and a1, a1, a3 +; RV32IA-NEXT: and a2, a2, a3 +; RV32IA-NEXT: slli a4, a0, 3 +; RV32IA-NEXT: andi a4, a4, 24 +; RV32IA-NEXT: sll a3, a3, a4 +; RV32IA-NEXT: sll a2, a2, a4 +; RV32IA-NEXT: sll a1, a1, a4 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a2, (a3) -; RV32IA-NEXT: and a4, a2, a5 -; RV32IA-NEXT: bne a4, a1, .LBB16_3 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a5, a4, a3 +; RV32IA-NEXT: bne a5, a1, .LBB16_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB16_1 Depth=1 -; RV32IA-NEXT: xor a4, a2, a0 -; RV32IA-NEXT: and a4, a4, a5 -; RV32IA-NEXT: xor a4, a2, a4 -; RV32IA-NEXT: sc.w.rl a4, a4, (a3) -; RV32IA-NEXT: bnez a4, .LBB16_1 +; RV32IA-NEXT: xor a5, a4, a2 +; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) +; RV32IA-NEXT: bnez a5, .LBB16_1 ; RV32IA-NEXT: .LBB16_3: ; RV32IA-NEXT: ret ; @@ -1322,26 +1322,26 @@ define void @cmpxchg_i16_acq_rel_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind ; ; RV64IA-LABEL: cmpxchg_i16_acq_rel_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a4, 16 -; RV64IA-NEXT: addiw a4, a4, -1 -; RV64IA-NEXT: sllw a5, a4, a0 -; RV64IA-NEXT: and a1, a1, a4 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: and a2, a2, a4 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a2, (a3) -; RV64IA-NEXT: and a4, a2, a5 -; RV64IA-NEXT: bne a4, a1, .LBB16_3 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB16_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB16_1 Depth=1 -; RV64IA-NEXT: xor a4, a2, a0 -; RV64IA-NEXT: and a4, a4, a5 -; RV64IA-NEXT: xor a4, a2, a4 -; RV64IA-NEXT: sc.w.rl a4, a4, (a3) -; RV64IA-NEXT: bnez a4, .LBB16_1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB16_1 ; RV64IA-NEXT: .LBB16_3: ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire @@ -1364,26 +1364,26 @@ define void @cmpxchg_i16_seq_cst_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi ; ; RV32IA-LABEL: cmpxchg_i16_seq_cst_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a4, 16 -; RV32IA-NEXT: addi a4, a4, -1 -; RV32IA-NEXT: sll a5, a4, a0 -; RV32IA-NEXT: and a1, a1, a4 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: and a2, a2, a4 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: lui a3, 16 +; RV32IA-NEXT: addi a3, a3, -1 +; RV32IA-NEXT: and a1, a1, a3 +; RV32IA-NEXT: and a2, a2, a3 +; RV32IA-NEXT: slli a4, a0, 3 +; RV32IA-NEXT: andi a4, a4, 24 +; RV32IA-NEXT: sll a3, a3, a4 +; RV32IA-NEXT: sll a2, a2, a4 +; RV32IA-NEXT: sll a1, a1, a4 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a2, (a3) -; RV32IA-NEXT: and a4, a2, a5 -; RV32IA-NEXT: bne a4, a1, .LBB17_3 +; RV32IA-NEXT: lr.w.aqrl a4, (a0) +; RV32IA-NEXT: and a5, a4, a3 +; RV32IA-NEXT: bne a5, a1, .LBB17_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB17_1 Depth=1 -; RV32IA-NEXT: xor a4, a2, a0 -; RV32IA-NEXT: and a4, a4, a5 -; RV32IA-NEXT: xor a4, a2, a4 -; RV32IA-NEXT: sc.w.aqrl a4, a4, (a3) -; RV32IA-NEXT: bnez a4, .LBB17_1 +; RV32IA-NEXT: xor a5, a4, a2 +; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV32IA-NEXT: bnez a5, .LBB17_1 ; RV32IA-NEXT: .LBB17_3: ; RV32IA-NEXT: ret ; @@ -1402,26 +1402,26 @@ define void @cmpxchg_i16_seq_cst_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwi ; ; RV64IA-LABEL: cmpxchg_i16_seq_cst_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a4, 16 -; RV64IA-NEXT: addiw a4, a4, -1 -; RV64IA-NEXT: sllw a5, a4, a0 -; RV64IA-NEXT: and a1, a1, a4 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: and a2, a2, a4 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a2, (a3) -; RV64IA-NEXT: and a4, a2, a5 -; RV64IA-NEXT: bne a4, a1, .LBB17_3 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB17_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB17_1 Depth=1 -; RV64IA-NEXT: xor a4, a2, a0 -; RV64IA-NEXT: and a4, a4, a5 -; RV64IA-NEXT: xor a4, a2, a4 -; RV64IA-NEXT: sc.w.aqrl a4, a4, (a3) -; RV64IA-NEXT: bnez a4, .LBB17_1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB17_1 ; RV64IA-NEXT: .LBB17_3: ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic @@ -1444,26 +1444,26 @@ define void @cmpxchg_i16_seq_cst_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind ; ; RV32IA-LABEL: cmpxchg_i16_seq_cst_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a4, 16 -; RV32IA-NEXT: addi a4, a4, -1 -; RV32IA-NEXT: sll a5, a4, a0 -; RV32IA-NEXT: and a1, a1, a4 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: and a2, a2, a4 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: lui a3, 16 +; RV32IA-NEXT: addi a3, a3, -1 +; RV32IA-NEXT: and a1, a1, a3 +; RV32IA-NEXT: and a2, a2, a3 +; RV32IA-NEXT: slli a4, a0, 3 +; RV32IA-NEXT: andi a4, a4, 24 +; RV32IA-NEXT: sll a3, a3, a4 +; RV32IA-NEXT: sll a2, a2, a4 +; RV32IA-NEXT: sll a1, a1, a4 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a2, (a3) -; RV32IA-NEXT: and a4, a2, a5 -; RV32IA-NEXT: bne a4, a1, .LBB18_3 +; RV32IA-NEXT: lr.w.aqrl a4, (a0) +; RV32IA-NEXT: and a5, a4, a3 +; RV32IA-NEXT: bne a5, a1, .LBB18_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB18_1 Depth=1 -; RV32IA-NEXT: xor a4, a2, a0 -; RV32IA-NEXT: and a4, a4, a5 -; RV32IA-NEXT: xor a4, a2, a4 -; RV32IA-NEXT: sc.w.aqrl a4, a4, (a3) -; RV32IA-NEXT: bnez a4, .LBB18_1 +; RV32IA-NEXT: xor a5, a4, a2 +; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV32IA-NEXT: bnez a5, .LBB18_1 ; RV32IA-NEXT: .LBB18_3: ; RV32IA-NEXT: ret ; @@ -1482,26 +1482,26 @@ define void @cmpxchg_i16_seq_cst_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind ; ; RV64IA-LABEL: cmpxchg_i16_seq_cst_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a4, 16 -; RV64IA-NEXT: addiw a4, a4, -1 -; RV64IA-NEXT: sllw a5, a4, a0 -; RV64IA-NEXT: and a1, a1, a4 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: and a2, a2, a4 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a2, (a3) -; RV64IA-NEXT: and a4, a2, a5 -; RV64IA-NEXT: bne a4, a1, .LBB18_3 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB18_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB18_1 Depth=1 -; RV64IA-NEXT: xor a4, a2, a0 -; RV64IA-NEXT: and a4, a4, a5 -; RV64IA-NEXT: xor a4, a2, a4 -; RV64IA-NEXT: sc.w.aqrl a4, a4, (a3) -; RV64IA-NEXT: bnez a4, .LBB18_1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB18_1 ; RV64IA-NEXT: .LBB18_3: ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire @@ -1524,26 +1524,26 @@ define void @cmpxchg_i16_seq_cst_seq_cst(i16* %ptr, i16 %cmp, i16 %val) nounwind ; ; RV32IA-LABEL: cmpxchg_i16_seq_cst_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a3, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a4, 16 -; RV32IA-NEXT: addi a4, a4, -1 -; RV32IA-NEXT: sll a5, a4, a0 -; RV32IA-NEXT: and a1, a1, a4 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: and a2, a2, a4 -; RV32IA-NEXT: sll a0, a2, a0 +; RV32IA-NEXT: lui a3, 16 +; RV32IA-NEXT: addi a3, a3, -1 +; RV32IA-NEXT: and a1, a1, a3 +; RV32IA-NEXT: and a2, a2, a3 +; RV32IA-NEXT: slli a4, a0, 3 +; RV32IA-NEXT: andi a4, a4, 24 +; RV32IA-NEXT: sll a3, a3, a4 +; RV32IA-NEXT: sll a2, a2, a4 +; RV32IA-NEXT: sll a1, a1, a4 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a2, (a3) -; RV32IA-NEXT: and a4, a2, a5 -; RV32IA-NEXT: bne a4, a1, .LBB19_3 +; RV32IA-NEXT: lr.w.aqrl a4, (a0) +; RV32IA-NEXT: and a5, a4, a3 +; RV32IA-NEXT: bne a5, a1, .LBB19_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB19_1 Depth=1 -; RV32IA-NEXT: xor a4, a2, a0 -; RV32IA-NEXT: and a4, a4, a5 -; RV32IA-NEXT: xor a4, a2, a4 -; RV32IA-NEXT: sc.w.aqrl a4, a4, (a3) -; RV32IA-NEXT: bnez a4, .LBB19_1 +; RV32IA-NEXT: xor a5, a4, a2 +; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV32IA-NEXT: bnez a5, .LBB19_1 ; RV32IA-NEXT: .LBB19_3: ; RV32IA-NEXT: ret ; @@ -1562,26 +1562,26 @@ define void @cmpxchg_i16_seq_cst_seq_cst(i16* %ptr, i16 %cmp, i16 %val) nounwind ; ; RV64IA-LABEL: cmpxchg_i16_seq_cst_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a3, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a4, 16 -; RV64IA-NEXT: addiw a4, a4, -1 -; RV64IA-NEXT: sllw a5, a4, a0 -; RV64IA-NEXT: and a1, a1, a4 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: and a2, a2, a4 -; RV64IA-NEXT: sllw a0, a2, a0 +; RV64IA-NEXT: lui a3, 16 +; RV64IA-NEXT: addiw a3, a3, -1 +; RV64IA-NEXT: and a1, a1, a3 +; RV64IA-NEXT: and a2, a2, a3 +; RV64IA-NEXT: slli a4, a0, 3 +; RV64IA-NEXT: andi a4, a4, 24 +; RV64IA-NEXT: sllw a3, a3, a4 +; RV64IA-NEXT: sllw a2, a2, a4 +; RV64IA-NEXT: sllw a1, a1, a4 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a2, (a3) -; RV64IA-NEXT: and a4, a2, a5 -; RV64IA-NEXT: bne a4, a1, .LBB19_3 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a5, a4, a3 +; RV64IA-NEXT: bne a5, a1, .LBB19_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB19_1 Depth=1 -; RV64IA-NEXT: xor a4, a2, a0 -; RV64IA-NEXT: and a4, a4, a5 -; RV64IA-NEXT: xor a4, a2, a4 -; RV64IA-NEXT: sc.w.aqrl a4, a4, (a3) -; RV64IA-NEXT: bnez a4, .LBB19_1 +; RV64IA-NEXT: xor a5, a4, a2 +; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) +; RV64IA-NEXT: bnez a5, .LBB19_1 ; RV64IA-NEXT: .LBB19_3: ; RV64IA-NEXT: ret %res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst @@ -2173,13 +2173,12 @@ define void @cmpxchg_i64_acquire_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: mv a5, a4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: mv a3, a4 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) @@ -2190,13 +2189,12 @@ define void @cmpxchg_i64_acquire_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) -; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 2 ; RV32IA-NEXT: mv a2, a3 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: mv a3, a4 +; RV32IA-NEXT: addi a4, zero, 2 ; RV32IA-NEXT: mv a5, zero ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) @@ -2235,14 +2233,13 @@ define void @cmpxchg_i64_acquire_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: mv a6, a4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp +; RV32I-NEXT: mv a2, a3 +; RV32I-NEXT: mv a3, a4 ; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: addi a5, zero, 2 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: mv a3, a6 ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 @@ -2252,14 +2249,13 @@ define void @cmpxchg_i64_acquire_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) -; RV32IA-NEXT: mv a6, a4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp +; RV32IA-NEXT: mv a2, a3 +; RV32IA-NEXT: mv a3, a4 ; RV32IA-NEXT: addi a4, zero, 2 ; RV32IA-NEXT: addi a5, zero, 2 -; RV32IA-NEXT: mv a2, a3 -; RV32IA-NEXT: mv a3, a6 ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 @@ -2297,13 +2293,12 @@ define void @cmpxchg_i64_release_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: mv a5, a4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 3 ; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: mv a3, a4 +; RV32I-NEXT: addi a4, zero, 3 ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) @@ -2314,13 +2309,12 @@ define void @cmpxchg_i64_release_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) -; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 3 ; RV32IA-NEXT: mv a2, a3 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: mv a3, a4 +; RV32IA-NEXT: addi a4, zero, 3 ; RV32IA-NEXT: mv a5, zero ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) @@ -2359,14 +2353,13 @@ define void @cmpxchg_i64_release_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: mv a6, a4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp +; RV32I-NEXT: mv a2, a3 +; RV32I-NEXT: mv a3, a4 ; RV32I-NEXT: addi a4, zero, 3 ; RV32I-NEXT: addi a5, zero, 2 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: mv a3, a6 ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 @@ -2376,14 +2369,13 @@ define void @cmpxchg_i64_release_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) -; RV32IA-NEXT: mv a6, a4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp +; RV32IA-NEXT: mv a2, a3 +; RV32IA-NEXT: mv a3, a4 ; RV32IA-NEXT: addi a4, zero, 3 ; RV32IA-NEXT: addi a5, zero, 2 -; RV32IA-NEXT: mv a2, a3 -; RV32IA-NEXT: mv a3, a6 ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 @@ -2421,13 +2413,12 @@ define void @cmpxchg_i64_acq_rel_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: mv a5, a4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 4 ; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: mv a3, a4 +; RV32I-NEXT: addi a4, zero, 4 ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) @@ -2438,13 +2429,12 @@ define void @cmpxchg_i64_acq_rel_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) -; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 4 ; RV32IA-NEXT: mv a2, a3 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: mv a3, a4 +; RV32IA-NEXT: addi a4, zero, 4 ; RV32IA-NEXT: mv a5, zero ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) @@ -2483,14 +2473,13 @@ define void @cmpxchg_i64_acq_rel_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: mv a6, a4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp +; RV32I-NEXT: mv a2, a3 +; RV32I-NEXT: mv a3, a4 ; RV32I-NEXT: addi a4, zero, 4 ; RV32I-NEXT: addi a5, zero, 2 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: mv a3, a6 ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 @@ -2500,14 +2489,13 @@ define void @cmpxchg_i64_acq_rel_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) -; RV32IA-NEXT: mv a6, a4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp +; RV32IA-NEXT: mv a2, a3 +; RV32IA-NEXT: mv a3, a4 ; RV32IA-NEXT: addi a4, zero, 4 ; RV32IA-NEXT: addi a5, zero, 2 -; RV32IA-NEXT: mv a2, a3 -; RV32IA-NEXT: mv a3, a6 ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 @@ -2545,13 +2533,12 @@ define void @cmpxchg_i64_seq_cst_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: mv a5, a4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp -; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: mv a3, a4 +; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) @@ -2562,13 +2549,12 @@ define void @cmpxchg_i64_seq_cst_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwi ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) -; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp -; RV32IA-NEXT: addi a4, zero, 5 ; RV32IA-NEXT: mv a2, a3 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: mv a3, a4 +; RV32IA-NEXT: addi a4, zero, 5 ; RV32IA-NEXT: mv a5, zero ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) @@ -2607,14 +2593,13 @@ define void @cmpxchg_i64_seq_cst_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: mv a6, a4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp +; RV32I-NEXT: mv a2, a3 +; RV32I-NEXT: mv a3, a4 ; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: addi a5, zero, 2 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: mv a3, a6 ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 @@ -2624,14 +2609,13 @@ define void @cmpxchg_i64_seq_cst_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) -; RV32IA-NEXT: mv a6, a4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp +; RV32IA-NEXT: mv a2, a3 +; RV32IA-NEXT: mv a3, a4 ; RV32IA-NEXT: addi a4, zero, 5 ; RV32IA-NEXT: addi a5, zero, 2 -; RV32IA-NEXT: mv a2, a3 -; RV32IA-NEXT: mv a3, a6 ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 @@ -2669,14 +2653,13 @@ define void @cmpxchg_i64_seq_cst_seq_cst(i64* %ptr, i64 %cmp, i64 %val) nounwind ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: mv a6, a4 ; RV32I-NEXT: sw a2, 4(sp) ; RV32I-NEXT: sw a1, 0(sp) ; RV32I-NEXT: mv a1, sp +; RV32I-NEXT: mv a2, a3 +; RV32I-NEXT: mv a3, a4 ; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: addi a5, zero, 5 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: mv a3, a6 ; RV32I-NEXT: call __atomic_compare_exchange_8 ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 @@ -2686,14 +2669,13 @@ define void @cmpxchg_i64_seq_cst_seq_cst(i64* %ptr, i64 %cmp, i64 %val) nounwind ; RV32IA: # %bb.0: ; RV32IA-NEXT: addi sp, sp, -16 ; RV32IA-NEXT: sw ra, 12(sp) -; RV32IA-NEXT: mv a6, a4 ; RV32IA-NEXT: sw a2, 4(sp) ; RV32IA-NEXT: sw a1, 0(sp) ; RV32IA-NEXT: mv a1, sp +; RV32IA-NEXT: mv a2, a3 +; RV32IA-NEXT: mv a3, a4 ; RV32IA-NEXT: addi a4, zero, 5 ; RV32IA-NEXT: addi a5, zero, 5 -; RV32IA-NEXT: mv a2, a3 -; RV32IA-NEXT: mv a3, a6 ; RV32IA-NEXT: call __atomic_compare_exchange_8 ; RV32IA-NEXT: lw ra, 12(sp) ; RV32IA-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll index dc396ef..c12a298 100644 --- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll +++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll @@ -21,23 +21,23 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xchg_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a4, (a2) +; RV32IA-NEXT: lr.w a4, (a0) ; RV32IA-NEXT: add a5, zero, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB0_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i8_monotonic: @@ -52,23 +52,23 @@ define i8 @atomicrmw_xchg_i8_monotonic(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xchg_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a4, (a2) +; RV64IA-NEXT: lr.w a4, (a0) ; RV64IA-NEXT: add a5, zero, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB0_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b monotonic ret i8 %1 @@ -87,23 +87,23 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xchg_i8_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a4, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) ; RV32IA-NEXT: add a5, zero, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB1_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i8_acquire: @@ -118,23 +118,23 @@ define i8 @atomicrmw_xchg_i8_acquire(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xchg_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a4, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) ; RV64IA-NEXT: add a5, zero, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB1_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b acquire ret i8 %1 @@ -153,23 +153,23 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xchg_i8_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a4, (a2) +; RV32IA-NEXT: lr.w a4, (a0) ; RV32IA-NEXT: add a5, zero, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB2_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i8_release: @@ -184,23 +184,23 @@ define i8 @atomicrmw_xchg_i8_release(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xchg_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a4, (a2) +; RV64IA-NEXT: lr.w a4, (a0) ; RV64IA-NEXT: add a5, zero, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB2_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b release ret i8 %1 @@ -219,23 +219,23 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xchg_i8_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a4, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) ; RV32IA-NEXT: add a5, zero, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB3_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i8_acq_rel: @@ -250,23 +250,23 @@ define i8 @atomicrmw_xchg_i8_acq_rel(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xchg_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a4, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) ; RV64IA-NEXT: add a5, zero, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB3_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b acq_rel ret i8 %1 @@ -285,23 +285,23 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xchg_i8_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a4, (a2) +; RV32IA-NEXT: lr.w.aqrl a4, (a0) ; RV32IA-NEXT: add a5, zero, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB4_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i8_seq_cst: @@ -316,23 +316,23 @@ define i8 @atomicrmw_xchg_i8_seq_cst(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xchg_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a4, (a2) +; RV64IA-NEXT: lr.w.aqrl a4, (a0) ; RV64IA-NEXT: add a5, zero, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB4_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i8* %a, i8 %b seq_cst ret i8 %1 @@ -351,23 +351,23 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_add_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a4, (a2) +; RV32IA-NEXT: lr.w a4, (a0) ; RV32IA-NEXT: add a5, a4, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB5_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_add_i8_monotonic: @@ -382,23 +382,23 @@ define i8 @atomicrmw_add_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a4, (a2) +; RV64IA-NEXT: lr.w a4, (a0) ; RV64IA-NEXT: add a5, a4, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB5_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b monotonic ret i8 %1 @@ -417,23 +417,23 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_add_i8_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a4, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) ; RV32IA-NEXT: add a5, a4, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB6_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_add_i8_acquire: @@ -448,23 +448,23 @@ define i8 @atomicrmw_add_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a4, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) ; RV64IA-NEXT: add a5, a4, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB6_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b acquire ret i8 %1 @@ -483,23 +483,23 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_add_i8_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a4, (a2) +; RV32IA-NEXT: lr.w a4, (a0) ; RV32IA-NEXT: add a5, a4, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB7_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_add_i8_release: @@ -514,23 +514,23 @@ define i8 @atomicrmw_add_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a4, (a2) +; RV64IA-NEXT: lr.w a4, (a0) ; RV64IA-NEXT: add a5, a4, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB7_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b release ret i8 %1 @@ -549,23 +549,23 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_add_i8_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a4, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) ; RV32IA-NEXT: add a5, a4, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB8_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_add_i8_acq_rel: @@ -580,23 +580,23 @@ define i8 @atomicrmw_add_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a4, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) ; RV64IA-NEXT: add a5, a4, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB8_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b acq_rel ret i8 %1 @@ -615,23 +615,23 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_add_i8_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a4, (a2) +; RV32IA-NEXT: lr.w.aqrl a4, (a0) ; RV32IA-NEXT: add a5, a4, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB9_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_add_i8_seq_cst: @@ -646,23 +646,23 @@ define i8 @atomicrmw_add_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a4, (a2) +; RV64IA-NEXT: lr.w.aqrl a4, (a0) ; RV64IA-NEXT: add a5, a4, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB9_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw add i8* %a, i8 %b seq_cst ret i8 %1 @@ -681,23 +681,23 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_sub_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a4, (a2) +; RV32IA-NEXT: lr.w a4, (a0) ; RV32IA-NEXT: sub a5, a4, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB10_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i8_monotonic: @@ -712,23 +712,23 @@ define i8 @atomicrmw_sub_i8_monotonic(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_sub_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a4, (a2) +; RV64IA-NEXT: lr.w a4, (a0) ; RV64IA-NEXT: sub a5, a4, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB10_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b monotonic ret i8 %1 @@ -747,23 +747,23 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_sub_i8_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a4, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) ; RV32IA-NEXT: sub a5, a4, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB11_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i8_acquire: @@ -778,23 +778,23 @@ define i8 @atomicrmw_sub_i8_acquire(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_sub_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a4, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) ; RV64IA-NEXT: sub a5, a4, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB11_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b acquire ret i8 %1 @@ -813,23 +813,23 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_sub_i8_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a4, (a2) +; RV32IA-NEXT: lr.w a4, (a0) ; RV32IA-NEXT: sub a5, a4, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB12_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i8_release: @@ -844,23 +844,23 @@ define i8 @atomicrmw_sub_i8_release(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_sub_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a4, (a2) +; RV64IA-NEXT: lr.w a4, (a0) ; RV64IA-NEXT: sub a5, a4, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB12_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b release ret i8 %1 @@ -879,23 +879,23 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_sub_i8_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a4, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) ; RV32IA-NEXT: sub a5, a4, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB13_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i8_acq_rel: @@ -910,23 +910,23 @@ define i8 @atomicrmw_sub_i8_acq_rel(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_sub_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a4, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) ; RV64IA-NEXT: sub a5, a4, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB13_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b acq_rel ret i8 %1 @@ -945,23 +945,23 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_sub_i8_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a4, (a2) +; RV32IA-NEXT: lr.w.aqrl a4, (a0) ; RV32IA-NEXT: sub a5, a4, a1 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB14_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i8_seq_cst: @@ -976,23 +976,23 @@ define i8 @atomicrmw_sub_i8_seq_cst(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_sub_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a4, (a2) +; RV64IA-NEXT: lr.w.aqrl a4, (a0) ; RV64IA-NEXT: sub a5, a4, a1 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB14_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw sub i8* %a, i8 %b seq_cst ret i8 %1 @@ -1011,17 +1011,17 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_and_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: not a3, a3 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: or a1, a3, a1 -; RV32IA-NEXT: amoand.w a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoand.w a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_and_i8_monotonic: @@ -1036,17 +1036,17 @@ define i8 @atomicrmw_and_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_and_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: not a3, a3 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: or a1, a3, a1 -; RV64IA-NEXT: amoand.w a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b monotonic ret i8 %1 @@ -1065,17 +1065,17 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_and_i8_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: not a3, a3 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: or a1, a3, a1 -; RV32IA-NEXT: amoand.w.aq a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoand.w.aq a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_and_i8_acquire: @@ -1090,17 +1090,17 @@ define i8 @atomicrmw_and_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_and_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: not a3, a3 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: or a1, a3, a1 -; RV64IA-NEXT: amoand.w.aq a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b acquire ret i8 %1 @@ -1119,17 +1119,17 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_and_i8_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: not a3, a3 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: or a1, a3, a1 -; RV32IA-NEXT: amoand.w.rl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoand.w.rl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_and_i8_release: @@ -1144,17 +1144,17 @@ define i8 @atomicrmw_and_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_and_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: not a3, a3 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: or a1, a3, a1 -; RV64IA-NEXT: amoand.w.rl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b release ret i8 %1 @@ -1173,17 +1173,17 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_and_i8_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: not a3, a3 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: or a1, a3, a1 -; RV32IA-NEXT: amoand.w.aqrl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_and_i8_acq_rel: @@ -1198,17 +1198,17 @@ define i8 @atomicrmw_and_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_and_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: not a3, a3 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: or a1, a3, a1 -; RV64IA-NEXT: amoand.w.aqrl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b acq_rel ret i8 %1 @@ -1227,17 +1227,17 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_and_i8_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: andi a1, a1, 255 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: not a3, a3 -; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 ; RV32IA-NEXT: or a1, a3, a1 -; RV32IA-NEXT: amoand.w.aqrl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_and_i8_seq_cst: @@ -1252,17 +1252,17 @@ define i8 @atomicrmw_and_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_and_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: andi a1, a1, 255 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: not a3, a3 -; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 ; RV64IA-NEXT: or a1, a3, a1 -; RV64IA-NEXT: amoand.w.aqrl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw and i8* %a, i8 %b seq_cst ret i8 %1 @@ -1281,24 +1281,24 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_nand_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a4, (a2) +; RV32IA-NEXT: lr.w a4, (a0) ; RV32IA-NEXT: and a5, a4, a1 ; RV32IA-NEXT: not a5, a5 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB20_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i8_monotonic: @@ -1313,24 +1313,24 @@ define i8 @atomicrmw_nand_i8_monotonic(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_nand_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a4, (a2) +; RV64IA-NEXT: lr.w a4, (a0) ; RV64IA-NEXT: and a5, a4, a1 ; RV64IA-NEXT: not a5, a5 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB20_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b monotonic ret i8 %1 @@ -1349,24 +1349,24 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_nand_i8_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a4, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) ; RV32IA-NEXT: and a5, a4, a1 ; RV32IA-NEXT: not a5, a5 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB21_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i8_acquire: @@ -1381,24 +1381,24 @@ define i8 @atomicrmw_nand_i8_acquire(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_nand_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a4, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) ; RV64IA-NEXT: and a5, a4, a1 ; RV64IA-NEXT: not a5, a5 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB21_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b acquire ret i8 %1 @@ -1417,24 +1417,24 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_nand_i8_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a4, (a2) +; RV32IA-NEXT: lr.w a4, (a0) ; RV32IA-NEXT: and a5, a4, a1 ; RV32IA-NEXT: not a5, a5 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB22_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i8_release: @@ -1449,24 +1449,24 @@ define i8 @atomicrmw_nand_i8_release(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_nand_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a4, (a2) +; RV64IA-NEXT: lr.w a4, (a0) ; RV64IA-NEXT: and a5, a4, a1 ; RV64IA-NEXT: not a5, a5 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB22_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b release ret i8 %1 @@ -1485,24 +1485,24 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_nand_i8_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a4, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) ; RV32IA-NEXT: and a5, a4, a1 ; RV32IA-NEXT: not a5, a5 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB23_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i8_acq_rel: @@ -1517,24 +1517,24 @@ define i8 @atomicrmw_nand_i8_acq_rel(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_nand_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB23_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a4, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) ; RV64IA-NEXT: and a5, a4, a1 ; RV64IA-NEXT: not a5, a5 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB23_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b acq_rel ret i8 %1 @@ -1553,24 +1553,24 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_nand_i8_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a3, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a4, (a2) +; RV32IA-NEXT: lr.w.aqrl a4, (a0) ; RV32IA-NEXT: and a5, a4, a1 ; RV32IA-NEXT: not a5, a5 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: and a5, a5, a3 ; RV32IA-NEXT: xor a5, a4, a5 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB24_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i8_seq_cst: @@ -1585,24 +1585,24 @@ define i8 @atomicrmw_nand_i8_seq_cst(i8* %a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_nand_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a3, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a4, (a2) +; RV64IA-NEXT: lr.w.aqrl a4, (a0) ; RV64IA-NEXT: and a5, a4, a1 ; RV64IA-NEXT: not a5, a5 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: and a5, a5, a3 ; RV64IA-NEXT: xor a5, a4, a5 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB24_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw nand i8* %a, i8 %b seq_cst ret i8 %1 @@ -1621,13 +1621,13 @@ define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_or_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoor.w a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoor.w a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_or_i8_monotonic: @@ -1642,13 +1642,13 @@ define i8 @atomicrmw_or_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_or_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoor.w a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b monotonic ret i8 %1 @@ -1667,13 +1667,13 @@ define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_or_i8_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoor.w.aq a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoor.w.aq a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_or_i8_acquire: @@ -1688,13 +1688,13 @@ define i8 @atomicrmw_or_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_or_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoor.w.aq a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b acquire ret i8 %1 @@ -1713,13 +1713,13 @@ define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_or_i8_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoor.w.rl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoor.w.rl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_or_i8_release: @@ -1734,13 +1734,13 @@ define i8 @atomicrmw_or_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_or_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoor.w.rl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b release ret i8 %1 @@ -1759,13 +1759,13 @@ define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_or_i8_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoor.w.aqrl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_or_i8_acq_rel: @@ -1780,13 +1780,13 @@ define i8 @atomicrmw_or_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_or_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoor.w.aqrl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b acq_rel ret i8 %1 @@ -1805,13 +1805,13 @@ define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_or_i8_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoor.w.aqrl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_or_i8_seq_cst: @@ -1826,13 +1826,13 @@ define i8 @atomicrmw_or_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_or_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoor.w.aqrl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw or i8* %a, i8 %b seq_cst ret i8 %1 @@ -1851,13 +1851,13 @@ define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xor_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoxor.w a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoxor.w a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xor_i8_monotonic: @@ -1872,13 +1872,13 @@ define i8 @atomicrmw_xor_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xor_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoxor.w a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b monotonic ret i8 %1 @@ -1897,13 +1897,13 @@ define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xor_i8_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoxor.w.aq a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoxor.w.aq a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xor_i8_acquire: @@ -1918,13 +1918,13 @@ define i8 @atomicrmw_xor_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xor_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoxor.w.aq a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b acquire ret i8 %1 @@ -1943,13 +1943,13 @@ define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xor_i8_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoxor.w.rl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoxor.w.rl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xor_i8_release: @@ -1964,13 +1964,13 @@ define i8 @atomicrmw_xor_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xor_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoxor.w.rl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b release ret i8 %1 @@ -1989,13 +1989,13 @@ define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xor_i8_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoxor.w.aqrl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xor_i8_acq_rel: @@ -2010,13 +2010,13 @@ define i8 @atomicrmw_xor_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xor_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoxor.w.aqrl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b acq_rel ret i8 %1 @@ -2035,13 +2035,13 @@ define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xor_i8_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoxor.w.aqrl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xor_i8_seq_cst: @@ -2056,13 +2056,13 @@ define i8 @atomicrmw_xor_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xor_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoxor.w.aqrl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xor i8* %a, i8 %b seq_cst ret i8 %1 @@ -2079,33 +2079,33 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lbu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 24 -; RV32I-NEXT: srai s0, a0, 24 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: srai s0, a1, 24 ; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB35_2 ; RV32I-NEXT: .LBB35_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB35_2 Depth=1 -; RV32I-NEXT: sb a1, 11(sp) +; RV32I-NEXT: sb a0, 11(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a1, 11(sp) -; RV32I-NEXT: bnez a0, .LBB35_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB35_4 ; RV32I-NEXT: .LBB35_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 24 -; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt s0, a0, .LBB35_1 +; RV32I-NEXT: slli a1, a0, 24 +; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: blt s0, a1, .LBB35_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB35_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB35_1 ; RV32I-NEXT: .LBB35_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -2116,32 +2116,32 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_max_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 24 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 24 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a3, a1, .LBB35_3 +; RV32IA-NEXT: lr.w a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a4, a1, .LBB35_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB35_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB35_3: # in Loop: Header=BB35_1 Depth=1 -; RV32IA-NEXT: sc.w a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB35_1 +; RV32IA-NEXT: sc.w a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB35_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i8_monotonic: @@ -2154,33 +2154,33 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lbu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 56 -; RV64I-NEXT: srai s0, a0, 56 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 56 +; RV64I-NEXT: srai s0, a1, 56 ; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB35_2 ; RV64I-NEXT: .LBB35_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB35_2 Depth=1 -; RV64I-NEXT: sb a1, 7(sp) +; RV64I-NEXT: sb a0, 7(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a1, 7(sp) -; RV64I-NEXT: bnez a0, .LBB35_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB35_4 ; RV64I-NEXT: .LBB35_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 56 -; RV64I-NEXT: srai a0, a0, 56 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt s0, a0, .LBB35_1 +; RV64I-NEXT: slli a1, a0, 56 +; RV64I-NEXT: srai a1, a1, 56 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB35_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB35_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB35_1 ; RV64I-NEXT: .LBB35_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -2191,32 +2191,32 @@ define i8 @atomicrmw_max_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_max_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 56 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a3, a1, .LBB35_3 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB35_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB35_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB35_3: # in Loop: Header=BB35_1 Depth=1 -; RV64IA-NEXT: sc.w a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB35_1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB35_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b monotonic ret i8 %1 @@ -2233,33 +2233,33 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lbu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 24 -; RV32I-NEXT: srai s0, a0, 24 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: srai s0, a1, 24 ; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB36_2 ; RV32I-NEXT: .LBB36_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB36_2 Depth=1 -; RV32I-NEXT: sb a1, 11(sp) -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sb a0, 11(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a1, 11(sp) -; RV32I-NEXT: bnez a0, .LBB36_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB36_4 ; RV32I-NEXT: .LBB36_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 24 -; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt s0, a0, .LBB36_1 +; RV32I-NEXT: slli a1, a0, 24 +; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: blt s0, a1, .LBB36_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB36_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB36_1 ; RV32I-NEXT: .LBB36_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -2270,32 +2270,32 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_max_i8_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 24 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 24 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a3, a1, .LBB36_3 +; RV32IA-NEXT: lr.w.aq a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a4, a1, .LBB36_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB36_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB36_3: # in Loop: Header=BB36_1 Depth=1 -; RV32IA-NEXT: sc.w a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB36_1 +; RV32IA-NEXT: sc.w a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB36_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i8_acquire: @@ -2308,33 +2308,33 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lbu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 56 -; RV64I-NEXT: srai s0, a0, 56 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 56 +; RV64I-NEXT: srai s0, a1, 56 ; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB36_2 ; RV64I-NEXT: .LBB36_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB36_2 Depth=1 -; RV64I-NEXT: sb a1, 7(sp) -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sb a0, 7(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 2 +; RV64I-NEXT: addi a4, zero, 2 ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a1, 7(sp) -; RV64I-NEXT: bnez a0, .LBB36_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB36_4 ; RV64I-NEXT: .LBB36_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 56 -; RV64I-NEXT: srai a0, a0, 56 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt s0, a0, .LBB36_1 +; RV64I-NEXT: slli a1, a0, 56 +; RV64I-NEXT: srai a1, a1, 56 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB36_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB36_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB36_1 ; RV64I-NEXT: .LBB36_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -2345,32 +2345,32 @@ define i8 @atomicrmw_max_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_max_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 56 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a3, a1, .LBB36_3 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB36_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB36_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB36_3: # in Loop: Header=BB36_1 Depth=1 -; RV64IA-NEXT: sc.w a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB36_1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB36_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b acquire ret i8 %1 @@ -2387,33 +2387,33 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lbu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 24 -; RV32I-NEXT: srai s0, a0, 24 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: srai s0, a1, 24 ; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB37_2 ; RV32I-NEXT: .LBB37_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB37_2 Depth=1 -; RV32I-NEXT: sb a1, 11(sp) -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: sb a0, 11(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a1, 11(sp) -; RV32I-NEXT: bnez a0, .LBB37_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB37_4 ; RV32I-NEXT: .LBB37_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 24 -; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt s0, a0, .LBB37_1 +; RV32I-NEXT: slli a1, a0, 24 +; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: blt s0, a1, .LBB37_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB37_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB37_1 ; RV32I-NEXT: .LBB37_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -2424,32 +2424,32 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_max_i8_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 24 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 24 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a3, a1, .LBB37_3 +; RV32IA-NEXT: lr.w a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a4, a1, .LBB37_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB37_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB37_3: # in Loop: Header=BB37_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB37_1 +; RV32IA-NEXT: sc.w.rl a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB37_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i8_release: @@ -2462,33 +2462,33 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lbu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 56 -; RV64I-NEXT: srai s0, a0, 56 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 56 +; RV64I-NEXT: srai s0, a1, 56 ; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB37_2 ; RV64I-NEXT: .LBB37_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB37_2 Depth=1 -; RV64I-NEXT: sb a1, 7(sp) -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: sb a0, 7(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 3 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a1, 7(sp) -; RV64I-NEXT: bnez a0, .LBB37_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB37_4 ; RV64I-NEXT: .LBB37_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 56 -; RV64I-NEXT: srai a0, a0, 56 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt s0, a0, .LBB37_1 +; RV64I-NEXT: slli a1, a0, 56 +; RV64I-NEXT: srai a1, a1, 56 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB37_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB37_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB37_1 ; RV64I-NEXT: .LBB37_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -2499,32 +2499,32 @@ define i8 @atomicrmw_max_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_max_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 56 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a3, a1, .LBB37_3 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB37_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB37_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB37_3: # in Loop: Header=BB37_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB37_1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB37_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b release ret i8 %1 @@ -2541,33 +2541,33 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lbu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 24 -; RV32I-NEXT: srai s0, a0, 24 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: srai s0, a1, 24 ; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB38_2 ; RV32I-NEXT: .LBB38_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB38_2 Depth=1 -; RV32I-NEXT: sb a1, 11(sp) -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sb a0, 11(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a1, 11(sp) -; RV32I-NEXT: bnez a0, .LBB38_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB38_4 ; RV32I-NEXT: .LBB38_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 24 -; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt s0, a0, .LBB38_1 +; RV32I-NEXT: slli a1, a0, 24 +; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: blt s0, a1, .LBB38_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB38_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB38_1 ; RV32I-NEXT: .LBB38_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -2578,32 +2578,32 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_max_i8_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 24 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 24 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a3, a1, .LBB38_3 +; RV32IA-NEXT: lr.w.aq a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a4, a1, .LBB38_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB38_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB38_3: # in Loop: Header=BB38_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB38_1 +; RV32IA-NEXT: sc.w.rl a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB38_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i8_acq_rel: @@ -2616,33 +2616,33 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lbu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 56 -; RV64I-NEXT: srai s0, a0, 56 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 56 +; RV64I-NEXT: srai s0, a1, 56 ; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB38_2 ; RV64I-NEXT: .LBB38_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB38_2 Depth=1 -; RV64I-NEXT: sb a1, 7(sp) -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sb a0, 7(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 4 +; RV64I-NEXT: addi a4, zero, 2 ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a1, 7(sp) -; RV64I-NEXT: bnez a0, .LBB38_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB38_4 ; RV64I-NEXT: .LBB38_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 56 -; RV64I-NEXT: srai a0, a0, 56 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt s0, a0, .LBB38_1 +; RV64I-NEXT: slli a1, a0, 56 +; RV64I-NEXT: srai a1, a1, 56 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB38_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB38_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB38_1 ; RV64I-NEXT: .LBB38_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -2653,32 +2653,32 @@ define i8 @atomicrmw_max_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_max_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 56 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a3, a1, .LBB38_3 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB38_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB38_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB38_3: # in Loop: Header=BB38_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB38_1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB38_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b acq_rel ret i8 %1 @@ -2695,33 +2695,33 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lbu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 24 -; RV32I-NEXT: srai s0, a0, 24 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: srai s0, a1, 24 ; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB39_2 ; RV32I-NEXT: .LBB39_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB39_2 Depth=1 -; RV32I-NEXT: sb a1, 11(sp) -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: sb a0, 11(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a1, 11(sp) -; RV32I-NEXT: bnez a0, .LBB39_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB39_4 ; RV32I-NEXT: .LBB39_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 24 -; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt s0, a0, .LBB39_1 +; RV32I-NEXT: slli a1, a0, 24 +; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: blt s0, a1, .LBB39_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB39_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB39_1 ; RV32I-NEXT: .LBB39_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -2732,32 +2732,32 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_max_i8_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 24 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 24 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a3, a1, .LBB39_3 +; RV32IA-NEXT: lr.w.aqrl a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a4, a1, .LBB39_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB39_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB39_3: # in Loop: Header=BB39_1 Depth=1 -; RV32IA-NEXT: sc.w.aqrl a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB39_1 +; RV32IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB39_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i8_seq_cst: @@ -2770,33 +2770,33 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lbu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 56 -; RV64I-NEXT: srai s0, a0, 56 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 56 +; RV64I-NEXT: srai s0, a1, 56 ; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB39_2 ; RV64I-NEXT: .LBB39_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB39_2 Depth=1 -; RV64I-NEXT: sb a1, 7(sp) -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: sb a0, 7(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 5 +; RV64I-NEXT: addi a4, zero, 5 ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a1, 7(sp) -; RV64I-NEXT: bnez a0, .LBB39_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB39_4 ; RV64I-NEXT: .LBB39_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 56 -; RV64I-NEXT: srai a0, a0, 56 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt s0, a0, .LBB39_1 +; RV64I-NEXT: slli a1, a0, 56 +; RV64I-NEXT: srai a1, a1, 56 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB39_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB39_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB39_1 ; RV64I-NEXT: .LBB39_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -2807,32 +2807,32 @@ define i8 @atomicrmw_max_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_max_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 56 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a3, a1, .LBB39_3 +; RV64IA-NEXT: lr.w.aqrl a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB39_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB39_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB39_3: # in Loop: Header=BB39_1 Depth=1 -; RV64IA-NEXT: sc.w.aqrl a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB39_1 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB39_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw max i8* %a, i8 %b seq_cst ret i8 %1 @@ -2849,33 +2849,33 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lbu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 24 -; RV32I-NEXT: srai s0, a0, 24 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: srai s0, a1, 24 ; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB40_2 ; RV32I-NEXT: .LBB40_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB40_2 Depth=1 -; RV32I-NEXT: sb a1, 11(sp) +; RV32I-NEXT: sb a0, 11(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a1, 11(sp) -; RV32I-NEXT: bnez a0, .LBB40_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB40_4 ; RV32I-NEXT: .LBB40_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 24 -; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bge s0, a0, .LBB40_1 +; RV32I-NEXT: slli a1, a0, 24 +; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bge s0, a1, .LBB40_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB40_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB40_1 ; RV32I-NEXT: .LBB40_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -2886,32 +2886,32 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_min_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 24 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 24 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a1, a3, .LBB40_3 +; RV32IA-NEXT: lr.w a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a1, a4, .LBB40_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB40_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB40_3: # in Loop: Header=BB40_1 Depth=1 -; RV32IA-NEXT: sc.w a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB40_1 +; RV32IA-NEXT: sc.w a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB40_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i8_monotonic: @@ -2924,33 +2924,33 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lbu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 56 -; RV64I-NEXT: srai s0, a0, 56 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 56 +; RV64I-NEXT: srai s0, a1, 56 ; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB40_2 ; RV64I-NEXT: .LBB40_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB40_2 Depth=1 -; RV64I-NEXT: sb a1, 7(sp) +; RV64I-NEXT: sb a0, 7(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a1, 7(sp) -; RV64I-NEXT: bnez a0, .LBB40_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB40_4 ; RV64I-NEXT: .LBB40_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 56 -; RV64I-NEXT: srai a0, a0, 56 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bge s0, a0, .LBB40_1 +; RV64I-NEXT: slli a1, a0, 56 +; RV64I-NEXT: srai a1, a1, 56 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB40_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB40_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB40_1 ; RV64I-NEXT: .LBB40_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -2961,32 +2961,32 @@ define i8 @atomicrmw_min_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_min_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 56 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a1, a3, .LBB40_3 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB40_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB40_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB40_3: # in Loop: Header=BB40_1 Depth=1 -; RV64IA-NEXT: sc.w a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB40_1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB40_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b monotonic ret i8 %1 @@ -3003,33 +3003,33 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lbu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 24 -; RV32I-NEXT: srai s0, a0, 24 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: srai s0, a1, 24 ; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB41_2 ; RV32I-NEXT: .LBB41_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB41_2 Depth=1 -; RV32I-NEXT: sb a1, 11(sp) -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sb a0, 11(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a1, 11(sp) -; RV32I-NEXT: bnez a0, .LBB41_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB41_4 ; RV32I-NEXT: .LBB41_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 24 -; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bge s0, a0, .LBB41_1 +; RV32I-NEXT: slli a1, a0, 24 +; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bge s0, a1, .LBB41_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB41_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB41_1 ; RV32I-NEXT: .LBB41_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -3040,32 +3040,32 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_min_i8_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 24 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 24 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a1, a3, .LBB41_3 +; RV32IA-NEXT: lr.w.aq a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a1, a4, .LBB41_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB41_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB41_3: # in Loop: Header=BB41_1 Depth=1 -; RV32IA-NEXT: sc.w a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB41_1 +; RV32IA-NEXT: sc.w a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB41_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i8_acquire: @@ -3078,33 +3078,33 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lbu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 56 -; RV64I-NEXT: srai s0, a0, 56 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 56 +; RV64I-NEXT: srai s0, a1, 56 ; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB41_2 ; RV64I-NEXT: .LBB41_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB41_2 Depth=1 -; RV64I-NEXT: sb a1, 7(sp) -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sb a0, 7(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 2 +; RV64I-NEXT: addi a4, zero, 2 ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a1, 7(sp) -; RV64I-NEXT: bnez a0, .LBB41_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB41_4 ; RV64I-NEXT: .LBB41_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 56 -; RV64I-NEXT: srai a0, a0, 56 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bge s0, a0, .LBB41_1 +; RV64I-NEXT: slli a1, a0, 56 +; RV64I-NEXT: srai a1, a1, 56 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB41_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB41_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB41_1 ; RV64I-NEXT: .LBB41_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -3115,32 +3115,32 @@ define i8 @atomicrmw_min_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_min_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 56 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a1, a3, .LBB41_3 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB41_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB41_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB41_3: # in Loop: Header=BB41_1 Depth=1 -; RV64IA-NEXT: sc.w a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB41_1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB41_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b acquire ret i8 %1 @@ -3157,33 +3157,33 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lbu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 24 -; RV32I-NEXT: srai s0, a0, 24 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: srai s0, a1, 24 ; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB42_2 ; RV32I-NEXT: .LBB42_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB42_2 Depth=1 -; RV32I-NEXT: sb a1, 11(sp) -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: sb a0, 11(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a1, 11(sp) -; RV32I-NEXT: bnez a0, .LBB42_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB42_4 ; RV32I-NEXT: .LBB42_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 24 -; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bge s0, a0, .LBB42_1 +; RV32I-NEXT: slli a1, a0, 24 +; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bge s0, a1, .LBB42_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB42_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB42_1 ; RV32I-NEXT: .LBB42_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -3194,32 +3194,32 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_min_i8_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 24 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 24 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a1, a3, .LBB42_3 +; RV32IA-NEXT: lr.w a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a1, a4, .LBB42_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB42_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB42_3: # in Loop: Header=BB42_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB42_1 +; RV32IA-NEXT: sc.w.rl a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB42_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i8_release: @@ -3232,33 +3232,33 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lbu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 56 -; RV64I-NEXT: srai s0, a0, 56 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 56 +; RV64I-NEXT: srai s0, a1, 56 ; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB42_2 ; RV64I-NEXT: .LBB42_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB42_2 Depth=1 -; RV64I-NEXT: sb a1, 7(sp) -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: sb a0, 7(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 3 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a1, 7(sp) -; RV64I-NEXT: bnez a0, .LBB42_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB42_4 ; RV64I-NEXT: .LBB42_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 56 -; RV64I-NEXT: srai a0, a0, 56 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bge s0, a0, .LBB42_1 +; RV64I-NEXT: slli a1, a0, 56 +; RV64I-NEXT: srai a1, a1, 56 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB42_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB42_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB42_1 ; RV64I-NEXT: .LBB42_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -3269,32 +3269,32 @@ define i8 @atomicrmw_min_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_min_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 56 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a1, a3, .LBB42_3 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB42_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB42_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB42_3: # in Loop: Header=BB42_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB42_1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB42_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b release ret i8 %1 @@ -3311,33 +3311,33 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lbu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 24 -; RV32I-NEXT: srai s0, a0, 24 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: srai s0, a1, 24 ; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB43_2 ; RV32I-NEXT: .LBB43_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB43_2 Depth=1 -; RV32I-NEXT: sb a1, 11(sp) -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sb a0, 11(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a1, 11(sp) -; RV32I-NEXT: bnez a0, .LBB43_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB43_4 ; RV32I-NEXT: .LBB43_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 24 -; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bge s0, a0, .LBB43_1 +; RV32I-NEXT: slli a1, a0, 24 +; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bge s0, a1, .LBB43_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB43_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB43_1 ; RV32I-NEXT: .LBB43_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -3348,32 +3348,32 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_min_i8_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 24 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 24 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a1, a3, .LBB43_3 +; RV32IA-NEXT: lr.w.aq a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a1, a4, .LBB43_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB43_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB43_3: # in Loop: Header=BB43_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB43_1 +; RV32IA-NEXT: sc.w.rl a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB43_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i8_acq_rel: @@ -3386,33 +3386,33 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lbu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 56 -; RV64I-NEXT: srai s0, a0, 56 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 56 +; RV64I-NEXT: srai s0, a1, 56 ; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB43_2 ; RV64I-NEXT: .LBB43_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB43_2 Depth=1 -; RV64I-NEXT: sb a1, 7(sp) -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sb a0, 7(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 4 +; RV64I-NEXT: addi a4, zero, 2 ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a1, 7(sp) -; RV64I-NEXT: bnez a0, .LBB43_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB43_4 ; RV64I-NEXT: .LBB43_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 56 -; RV64I-NEXT: srai a0, a0, 56 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bge s0, a0, .LBB43_1 +; RV64I-NEXT: slli a1, a0, 56 +; RV64I-NEXT: srai a1, a1, 56 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB43_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB43_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB43_1 ; RV64I-NEXT: .LBB43_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -3423,32 +3423,32 @@ define i8 @atomicrmw_min_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_min_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 56 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a1, a3, .LBB43_3 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB43_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB43_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB43_3: # in Loop: Header=BB43_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB43_1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB43_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b acq_rel ret i8 %1 @@ -3465,33 +3465,33 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lbu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 24 -; RV32I-NEXT: srai s0, a0, 24 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 24 +; RV32I-NEXT: srai s0, a1, 24 ; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB44_2 ; RV32I-NEXT: .LBB44_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB44_2 Depth=1 -; RV32I-NEXT: sb a1, 11(sp) -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: sb a0, 11(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a1, 11(sp) -; RV32I-NEXT: bnez a0, .LBB44_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB44_4 ; RV32I-NEXT: .LBB44_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 24 -; RV32I-NEXT: srai a0, a0, 24 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bge s0, a0, .LBB44_1 +; RV32I-NEXT: slli a1, a0, 24 +; RV32I-NEXT: srai a1, a1, 24 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bge s0, a1, .LBB44_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB44_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB44_1 ; RV32I-NEXT: .LBB44_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -3502,32 +3502,32 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_min_i8_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 24 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: addi a4, zero, 255 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 24 ; RV32IA-NEXT: srai a1, a1, 24 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 24 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a1, a3, .LBB44_3 +; RV32IA-NEXT: lr.w.aqrl a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a1, a4, .LBB44_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB44_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB44_3: # in Loop: Header=BB44_1 Depth=1 -; RV32IA-NEXT: sc.w.aqrl a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB44_1 +; RV32IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB44_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i8_seq_cst: @@ -3540,33 +3540,33 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lbu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 56 -; RV64I-NEXT: srai s0, a0, 56 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 56 +; RV64I-NEXT: srai s0, a1, 56 ; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB44_2 ; RV64I-NEXT: .LBB44_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB44_2 Depth=1 -; RV64I-NEXT: sb a1, 7(sp) -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: sb a0, 7(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 5 +; RV64I-NEXT: addi a4, zero, 5 ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a1, 7(sp) -; RV64I-NEXT: bnez a0, .LBB44_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB44_4 ; RV64I-NEXT: .LBB44_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 56 -; RV64I-NEXT: srai a0, a0, 56 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bge s0, a0, .LBB44_1 +; RV64I-NEXT: slli a1, a0, 56 +; RV64I-NEXT: srai a1, a1, 56 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB44_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB44_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB44_1 ; RV64I-NEXT: .LBB44_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -3577,32 +3577,32 @@ define i8 @atomicrmw_min_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_min_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 56 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: addi a4, zero, 255 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 56 ; RV64IA-NEXT: srai a1, a1, 56 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 56 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a1, a3, .LBB44_3 +; RV64IA-NEXT: lr.w.aqrl a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB44_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB44_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB44_3: # in Loop: Header=BB44_1 Depth=1 -; RV64IA-NEXT: sc.w.aqrl a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB44_1 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB44_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw min i8* %a, i8 %b seq_cst ret i8 %1 @@ -3617,33 +3617,33 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lbu a3, 0(a0) -; RV32I-NEXT: mv s3, a1 -; RV32I-NEXT: andi s1, a1, 255 -; RV32I-NEXT: addi s2, sp, 11 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: andi s0, a1, 255 +; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB45_2 ; RV32I-NEXT: .LBB45_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB45_2 Depth=1 -; RV32I-NEXT: sb a3, 11(sp) -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: sb a0, 11(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a3, 11(sp) -; RV32I-NEXT: bnez a0, .LBB45_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB45_4 ; RV32I-NEXT: .LBB45_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: andi a0, a3, 255 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a0, .LBB45_1 +; RV32I-NEXT: andi a1, a0, 255 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bltu s0, a1, .LBB45_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB45_2 Depth=1 -; RV32I-NEXT: mv a2, s3 +; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB45_1 ; RV32I-NEXT: .LBB45_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -3654,27 +3654,27 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umax_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a6, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB45_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a4, (a6) -; RV32IA-NEXT: and a2, a4, a3 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a3, a4, a6 ; RV32IA-NEXT: mv a5, a4 -; RV32IA-NEXT: bgeu a2, a1, .LBB45_3 +; RV32IA-NEXT: bgeu a3, a1, .LBB45_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB45_1 Depth=1 ; RV32IA-NEXT: xor a5, a4, a1 -; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: and a5, a5, a6 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB45_3: # in Loop: Header=BB45_1 Depth=1 -; RV32IA-NEXT: sc.w a5, a5, (a6) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB45_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umax_i8_monotonic: @@ -3685,33 +3685,33 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lbu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: andi s1, a1, 255 -; RV64I-NEXT: addi s2, sp, 7 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: andi s0, a1, 255 +; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB45_2 ; RV64I-NEXT: .LBB45_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB45_2 Depth=1 -; RV64I-NEXT: sb a3, 7(sp) -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: sb a0, 7(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a3, 7(sp) -; RV64I-NEXT: bnez a0, .LBB45_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB45_4 ; RV64I-NEXT: .LBB45_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: andi a0, a3, 255 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a0, .LBB45_1 +; RV64I-NEXT: andi a1, a0, 255 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s0, a1, .LBB45_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB45_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB45_1 ; RV64I-NEXT: .LBB45_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -3722,27 +3722,27 @@ define i8 @atomicrmw_umax_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB45_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a4, (a6) -; RV64IA-NEXT: and a2, a4, a3 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 ; RV64IA-NEXT: mv a5, a4 -; RV64IA-NEXT: bgeu a2, a1, .LBB45_3 +; RV64IA-NEXT: bgeu a3, a1, .LBB45_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB45_1 Depth=1 ; RV64IA-NEXT: xor a5, a4, a1 -; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: and a5, a5, a6 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB45_3: # in Loop: Header=BB45_1 Depth=1 -; RV64IA-NEXT: sc.w a5, a5, (a6) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB45_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b monotonic ret i8 %1 @@ -3757,33 +3757,33 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lbu a3, 0(a0) -; RV32I-NEXT: mv s3, a1 -; RV32I-NEXT: andi s1, a1, 255 -; RV32I-NEXT: addi s2, sp, 11 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: andi s0, a1, 255 +; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB46_2 ; RV32I-NEXT: .LBB46_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB46_2 Depth=1 -; RV32I-NEXT: sb a3, 11(sp) +; RV32I-NEXT: sb a0, 11(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a3, zero, 2 ; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a3, 11(sp) -; RV32I-NEXT: bnez a0, .LBB46_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB46_4 ; RV32I-NEXT: .LBB46_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: andi a0, a3, 255 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a0, .LBB46_1 +; RV32I-NEXT: andi a1, a0, 255 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bltu s0, a1, .LBB46_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB46_2 Depth=1 -; RV32I-NEXT: mv a2, s3 +; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB46_1 ; RV32I-NEXT: .LBB46_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -3794,27 +3794,27 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umax_i8_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a6, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB46_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a4, (a6) -; RV32IA-NEXT: and a2, a4, a3 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a3, a4, a6 ; RV32IA-NEXT: mv a5, a4 -; RV32IA-NEXT: bgeu a2, a1, .LBB46_3 +; RV32IA-NEXT: bgeu a3, a1, .LBB46_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB46_1 Depth=1 ; RV32IA-NEXT: xor a5, a4, a1 -; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: and a5, a5, a6 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB46_3: # in Loop: Header=BB46_1 Depth=1 -; RV32IA-NEXT: sc.w a5, a5, (a6) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB46_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umax_i8_acquire: @@ -3825,33 +3825,33 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lbu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: andi s1, a1, 255 -; RV64I-NEXT: addi s2, sp, 7 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: andi s0, a1, 255 +; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB46_2 ; RV64I-NEXT: .LBB46_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB46_2 Depth=1 -; RV64I-NEXT: sb a3, 7(sp) +; RV64I-NEXT: sb a0, 7(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 2 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a3, 7(sp) -; RV64I-NEXT: bnez a0, .LBB46_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB46_4 ; RV64I-NEXT: .LBB46_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: andi a0, a3, 255 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a0, .LBB46_1 +; RV64I-NEXT: andi a1, a0, 255 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s0, a1, .LBB46_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB46_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB46_1 ; RV64I-NEXT: .LBB46_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -3862,27 +3862,27 @@ define i8 @atomicrmw_umax_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB46_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a4, (a6) -; RV64IA-NEXT: and a2, a4, a3 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 ; RV64IA-NEXT: mv a5, a4 -; RV64IA-NEXT: bgeu a2, a1, .LBB46_3 +; RV64IA-NEXT: bgeu a3, a1, .LBB46_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB46_1 Depth=1 ; RV64IA-NEXT: xor a5, a4, a1 -; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: and a5, a5, a6 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB46_3: # in Loop: Header=BB46_1 Depth=1 -; RV64IA-NEXT: sc.w a5, a5, (a6) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB46_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b acquire ret i8 %1 @@ -3897,33 +3897,33 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lbu a3, 0(a0) -; RV32I-NEXT: mv s3, a1 -; RV32I-NEXT: andi s1, a1, 255 -; RV32I-NEXT: addi s2, sp, 11 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: andi s0, a1, 255 +; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB47_2 ; RV32I-NEXT: .LBB47_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB47_2 Depth=1 -; RV32I-NEXT: sb a3, 11(sp) +; RV32I-NEXT: sb a0, 11(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a3, zero, 3 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a3, 11(sp) -; RV32I-NEXT: bnez a0, .LBB47_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB47_4 ; RV32I-NEXT: .LBB47_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: andi a0, a3, 255 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a0, .LBB47_1 +; RV32I-NEXT: andi a1, a0, 255 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bltu s0, a1, .LBB47_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB47_2 Depth=1 -; RV32I-NEXT: mv a2, s3 +; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB47_1 ; RV32I-NEXT: .LBB47_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -3934,27 +3934,27 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umax_i8_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a6, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB47_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a4, (a6) -; RV32IA-NEXT: and a2, a4, a3 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a3, a4, a6 ; RV32IA-NEXT: mv a5, a4 -; RV32IA-NEXT: bgeu a2, a1, .LBB47_3 +; RV32IA-NEXT: bgeu a3, a1, .LBB47_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB47_1 Depth=1 ; RV32IA-NEXT: xor a5, a4, a1 -; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: and a5, a5, a6 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB47_3: # in Loop: Header=BB47_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a5, a5, (a6) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB47_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umax_i8_release: @@ -3965,33 +3965,33 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lbu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: andi s1, a1, 255 -; RV64I-NEXT: addi s2, sp, 7 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: andi s0, a1, 255 +; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB47_2 ; RV64I-NEXT: .LBB47_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB47_2 Depth=1 -; RV64I-NEXT: sb a3, 7(sp) +; RV64I-NEXT: sb a0, 7(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a3, 7(sp) -; RV64I-NEXT: bnez a0, .LBB47_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB47_4 ; RV64I-NEXT: .LBB47_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: andi a0, a3, 255 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a0, .LBB47_1 +; RV64I-NEXT: andi a1, a0, 255 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s0, a1, .LBB47_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB47_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB47_1 ; RV64I-NEXT: .LBB47_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -4002,27 +4002,27 @@ define i8 @atomicrmw_umax_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB47_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a4, (a6) -; RV64IA-NEXT: and a2, a4, a3 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 ; RV64IA-NEXT: mv a5, a4 -; RV64IA-NEXT: bgeu a2, a1, .LBB47_3 +; RV64IA-NEXT: bgeu a3, a1, .LBB47_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB47_1 Depth=1 ; RV64IA-NEXT: xor a5, a4, a1 -; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: and a5, a5, a6 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB47_3: # in Loop: Header=BB47_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a5, a5, (a6) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB47_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b release ret i8 %1 @@ -4037,33 +4037,33 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lbu a3, 0(a0) -; RV32I-NEXT: mv s3, a1 -; RV32I-NEXT: andi s1, a1, 255 -; RV32I-NEXT: addi s2, sp, 11 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: andi s0, a1, 255 +; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB48_2 ; RV32I-NEXT: .LBB48_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB48_2 Depth=1 -; RV32I-NEXT: sb a3, 11(sp) +; RV32I-NEXT: sb a0, 11(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a3, zero, 4 ; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a3, 11(sp) -; RV32I-NEXT: bnez a0, .LBB48_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB48_4 ; RV32I-NEXT: .LBB48_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: andi a0, a3, 255 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a0, .LBB48_1 +; RV32I-NEXT: andi a1, a0, 255 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bltu s0, a1, .LBB48_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB48_2 Depth=1 -; RV32I-NEXT: mv a2, s3 +; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB48_1 ; RV32I-NEXT: .LBB48_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -4074,27 +4074,27 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umax_i8_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a6, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB48_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a4, (a6) -; RV32IA-NEXT: and a2, a4, a3 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a3, a4, a6 ; RV32IA-NEXT: mv a5, a4 -; RV32IA-NEXT: bgeu a2, a1, .LBB48_3 +; RV32IA-NEXT: bgeu a3, a1, .LBB48_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB48_1 Depth=1 ; RV32IA-NEXT: xor a5, a4, a1 -; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: and a5, a5, a6 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB48_3: # in Loop: Header=BB48_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a5, a5, (a6) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB48_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umax_i8_acq_rel: @@ -4105,33 +4105,33 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lbu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: andi s1, a1, 255 -; RV64I-NEXT: addi s2, sp, 7 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: andi s0, a1, 255 +; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB48_2 ; RV64I-NEXT: .LBB48_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB48_2 Depth=1 -; RV64I-NEXT: sb a3, 7(sp) +; RV64I-NEXT: sb a0, 7(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 4 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a3, 7(sp) -; RV64I-NEXT: bnez a0, .LBB48_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB48_4 ; RV64I-NEXT: .LBB48_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: andi a0, a3, 255 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a0, .LBB48_1 +; RV64I-NEXT: andi a1, a0, 255 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s0, a1, .LBB48_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB48_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB48_1 ; RV64I-NEXT: .LBB48_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -4142,27 +4142,27 @@ define i8 @atomicrmw_umax_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB48_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a4, (a6) -; RV64IA-NEXT: and a2, a4, a3 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 ; RV64IA-NEXT: mv a5, a4 -; RV64IA-NEXT: bgeu a2, a1, .LBB48_3 +; RV64IA-NEXT: bgeu a3, a1, .LBB48_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB48_1 Depth=1 ; RV64IA-NEXT: xor a5, a4, a1 -; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: and a5, a5, a6 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB48_3: # in Loop: Header=BB48_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a5, a5, (a6) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB48_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b acq_rel ret i8 %1 @@ -4177,33 +4177,33 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lbu a3, 0(a0) -; RV32I-NEXT: mv s3, a1 -; RV32I-NEXT: andi s1, a1, 255 -; RV32I-NEXT: addi s2, sp, 11 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: andi s0, a1, 255 +; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB49_2 ; RV32I-NEXT: .LBB49_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB49_2 Depth=1 -; RV32I-NEXT: sb a3, 11(sp) +; RV32I-NEXT: sb a0, 11(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a3, zero, 5 ; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a3, 11(sp) -; RV32I-NEXT: bnez a0, .LBB49_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB49_4 ; RV32I-NEXT: .LBB49_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: andi a0, a3, 255 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a0, .LBB49_1 +; RV32I-NEXT: andi a1, a0, 255 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bltu s0, a1, .LBB49_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB49_2 Depth=1 -; RV32I-NEXT: mv a2, s3 +; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB49_1 ; RV32I-NEXT: .LBB49_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -4214,27 +4214,27 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umax_i8_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a6, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB49_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a4, (a6) -; RV32IA-NEXT: and a2, a4, a3 +; RV32IA-NEXT: lr.w.aqrl a4, (a0) +; RV32IA-NEXT: and a3, a4, a6 ; RV32IA-NEXT: mv a5, a4 -; RV32IA-NEXT: bgeu a2, a1, .LBB49_3 +; RV32IA-NEXT: bgeu a3, a1, .LBB49_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB49_1 Depth=1 ; RV32IA-NEXT: xor a5, a4, a1 -; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: and a5, a5, a6 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB49_3: # in Loop: Header=BB49_1 Depth=1 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a6) +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB49_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umax_i8_seq_cst: @@ -4245,33 +4245,33 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lbu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: andi s1, a1, 255 -; RV64I-NEXT: addi s2, sp, 7 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: andi s0, a1, 255 +; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB49_2 ; RV64I-NEXT: .LBB49_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB49_2 Depth=1 -; RV64I-NEXT: sb a3, 7(sp) +; RV64I-NEXT: sb a0, 7(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 5 ; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a3, 7(sp) -; RV64I-NEXT: bnez a0, .LBB49_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB49_4 ; RV64I-NEXT: .LBB49_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: andi a0, a3, 255 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a0, .LBB49_1 +; RV64I-NEXT: andi a1, a0, 255 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s0, a1, .LBB49_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB49_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB49_1 ; RV64I-NEXT: .LBB49_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -4282,27 +4282,27 @@ define i8 @atomicrmw_umax_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB49_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a4, (a6) -; RV64IA-NEXT: and a2, a4, a3 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 ; RV64IA-NEXT: mv a5, a4 -; RV64IA-NEXT: bgeu a2, a1, .LBB49_3 +; RV64IA-NEXT: bgeu a3, a1, .LBB49_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB49_1 Depth=1 ; RV64IA-NEXT: xor a5, a4, a1 -; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: and a5, a5, a6 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB49_3: # in Loop: Header=BB49_1 Depth=1 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a6) +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB49_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw umax i8* %a, i8 %b seq_cst ret i8 %1 @@ -4317,33 +4317,33 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lbu a3, 0(a0) -; RV32I-NEXT: mv s3, a1 -; RV32I-NEXT: andi s1, a1, 255 -; RV32I-NEXT: addi s2, sp, 11 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: andi s0, a1, 255 +; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB50_2 ; RV32I-NEXT: .LBB50_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB50_2 Depth=1 -; RV32I-NEXT: sb a3, 11(sp) -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: sb a0, 11(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a3, 11(sp) -; RV32I-NEXT: bnez a0, .LBB50_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB50_4 ; RV32I-NEXT: .LBB50_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: andi a0, a3, 255 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a0, .LBB50_1 +; RV32I-NEXT: andi a1, a0, 255 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bgeu s0, a1, .LBB50_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB50_2 Depth=1 -; RV32I-NEXT: mv a2, s3 +; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB50_1 ; RV32I-NEXT: .LBB50_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -4354,27 +4354,27 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umin_i8_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a6, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB50_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a4, (a6) -; RV32IA-NEXT: and a2, a4, a3 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a3, a4, a6 ; RV32IA-NEXT: mv a5, a4 -; RV32IA-NEXT: bgeu a1, a2, .LBB50_3 +; RV32IA-NEXT: bgeu a1, a3, .LBB50_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB50_1 Depth=1 ; RV32IA-NEXT: xor a5, a4, a1 -; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: and a5, a5, a6 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB50_3: # in Loop: Header=BB50_1 Depth=1 -; RV32IA-NEXT: sc.w a5, a5, (a6) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB50_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umin_i8_monotonic: @@ -4385,33 +4385,33 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lbu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: andi s1, a1, 255 -; RV64I-NEXT: addi s2, sp, 7 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: andi s0, a1, 255 +; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB50_2 ; RV64I-NEXT: .LBB50_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB50_2 Depth=1 -; RV64I-NEXT: sb a3, 7(sp) -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: sb a0, 7(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a3, 7(sp) -; RV64I-NEXT: bnez a0, .LBB50_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB50_4 ; RV64I-NEXT: .LBB50_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: andi a0, a3, 255 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a0, .LBB50_1 +; RV64I-NEXT: andi a1, a0, 255 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s0, a1, .LBB50_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB50_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB50_1 ; RV64I-NEXT: .LBB50_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -4422,27 +4422,27 @@ define i8 @atomicrmw_umin_i8_monotonic(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i8_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB50_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a4, (a6) -; RV64IA-NEXT: and a2, a4, a3 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 ; RV64IA-NEXT: mv a5, a4 -; RV64IA-NEXT: bgeu a1, a2, .LBB50_3 +; RV64IA-NEXT: bgeu a1, a3, .LBB50_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB50_1 Depth=1 ; RV64IA-NEXT: xor a5, a4, a1 -; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: and a5, a5, a6 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB50_3: # in Loop: Header=BB50_1 Depth=1 -; RV64IA-NEXT: sc.w a5, a5, (a6) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB50_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b monotonic ret i8 %1 @@ -4457,33 +4457,33 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lbu a3, 0(a0) -; RV32I-NEXT: mv s3, a1 -; RV32I-NEXT: andi s1, a1, 255 -; RV32I-NEXT: addi s2, sp, 11 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: andi s0, a1, 255 +; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB51_2 ; RV32I-NEXT: .LBB51_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB51_2 Depth=1 -; RV32I-NEXT: sb a3, 11(sp) +; RV32I-NEXT: sb a0, 11(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a3, zero, 2 ; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a3, 11(sp) -; RV32I-NEXT: bnez a0, .LBB51_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB51_4 ; RV32I-NEXT: .LBB51_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: andi a0, a3, 255 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a0, .LBB51_1 +; RV32I-NEXT: andi a1, a0, 255 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bgeu s0, a1, .LBB51_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB51_2 Depth=1 -; RV32I-NEXT: mv a2, s3 +; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB51_1 ; RV32I-NEXT: .LBB51_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -4494,27 +4494,27 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umin_i8_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a6, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB51_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a4, (a6) -; RV32IA-NEXT: and a2, a4, a3 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a3, a4, a6 ; RV32IA-NEXT: mv a5, a4 -; RV32IA-NEXT: bgeu a1, a2, .LBB51_3 +; RV32IA-NEXT: bgeu a1, a3, .LBB51_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB51_1 Depth=1 ; RV32IA-NEXT: xor a5, a4, a1 -; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: and a5, a5, a6 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB51_3: # in Loop: Header=BB51_1 Depth=1 -; RV32IA-NEXT: sc.w a5, a5, (a6) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB51_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umin_i8_acquire: @@ -4525,33 +4525,33 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lbu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: andi s1, a1, 255 -; RV64I-NEXT: addi s2, sp, 7 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: andi s0, a1, 255 +; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB51_2 ; RV64I-NEXT: .LBB51_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB51_2 Depth=1 -; RV64I-NEXT: sb a3, 7(sp) +; RV64I-NEXT: sb a0, 7(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 2 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a3, 7(sp) -; RV64I-NEXT: bnez a0, .LBB51_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB51_4 ; RV64I-NEXT: .LBB51_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: andi a0, a3, 255 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a0, .LBB51_1 +; RV64I-NEXT: andi a1, a0, 255 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s0, a1, .LBB51_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB51_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB51_1 ; RV64I-NEXT: .LBB51_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -4562,27 +4562,27 @@ define i8 @atomicrmw_umin_i8_acquire(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i8_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB51_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a4, (a6) -; RV64IA-NEXT: and a2, a4, a3 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 ; RV64IA-NEXT: mv a5, a4 -; RV64IA-NEXT: bgeu a1, a2, .LBB51_3 +; RV64IA-NEXT: bgeu a1, a3, .LBB51_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB51_1 Depth=1 ; RV64IA-NEXT: xor a5, a4, a1 -; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: and a5, a5, a6 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB51_3: # in Loop: Header=BB51_1 Depth=1 -; RV64IA-NEXT: sc.w a5, a5, (a6) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB51_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b acquire ret i8 %1 @@ -4597,33 +4597,33 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lbu a3, 0(a0) -; RV32I-NEXT: mv s3, a1 -; RV32I-NEXT: andi s1, a1, 255 -; RV32I-NEXT: addi s2, sp, 11 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: andi s0, a1, 255 +; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB52_2 ; RV32I-NEXT: .LBB52_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB52_2 Depth=1 -; RV32I-NEXT: sb a3, 11(sp) +; RV32I-NEXT: sb a0, 11(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a3, zero, 3 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a3, 11(sp) -; RV32I-NEXT: bnez a0, .LBB52_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB52_4 ; RV32I-NEXT: .LBB52_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: andi a0, a3, 255 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a0, .LBB52_1 +; RV32I-NEXT: andi a1, a0, 255 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bgeu s0, a1, .LBB52_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB52_2 Depth=1 -; RV32I-NEXT: mv a2, s3 +; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB52_1 ; RV32I-NEXT: .LBB52_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -4634,27 +4634,27 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umin_i8_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a6, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a4, (a6) -; RV32IA-NEXT: and a2, a4, a3 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a3, a4, a6 ; RV32IA-NEXT: mv a5, a4 -; RV32IA-NEXT: bgeu a1, a2, .LBB52_3 +; RV32IA-NEXT: bgeu a1, a3, .LBB52_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB52_1 Depth=1 ; RV32IA-NEXT: xor a5, a4, a1 -; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: and a5, a5, a6 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB52_3: # in Loop: Header=BB52_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a5, a5, (a6) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB52_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umin_i8_release: @@ -4665,33 +4665,33 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lbu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: andi s1, a1, 255 -; RV64I-NEXT: addi s2, sp, 7 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: andi s0, a1, 255 +; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB52_2 ; RV64I-NEXT: .LBB52_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB52_2 Depth=1 -; RV64I-NEXT: sb a3, 7(sp) +; RV64I-NEXT: sb a0, 7(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a3, 7(sp) -; RV64I-NEXT: bnez a0, .LBB52_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB52_4 ; RV64I-NEXT: .LBB52_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: andi a0, a3, 255 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a0, .LBB52_1 +; RV64I-NEXT: andi a1, a0, 255 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s0, a1, .LBB52_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB52_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB52_1 ; RV64I-NEXT: .LBB52_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -4702,27 +4702,27 @@ define i8 @atomicrmw_umin_i8_release(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i8_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a4, (a6) -; RV64IA-NEXT: and a2, a4, a3 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 ; RV64IA-NEXT: mv a5, a4 -; RV64IA-NEXT: bgeu a1, a2, .LBB52_3 +; RV64IA-NEXT: bgeu a1, a3, .LBB52_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB52_1 Depth=1 ; RV64IA-NEXT: xor a5, a4, a1 -; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: and a5, a5, a6 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB52_3: # in Loop: Header=BB52_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a5, a5, (a6) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB52_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b release ret i8 %1 @@ -4737,33 +4737,33 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lbu a3, 0(a0) -; RV32I-NEXT: mv s3, a1 -; RV32I-NEXT: andi s1, a1, 255 -; RV32I-NEXT: addi s2, sp, 11 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: andi s0, a1, 255 +; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB53_2 ; RV32I-NEXT: .LBB53_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB53_2 Depth=1 -; RV32I-NEXT: sb a3, 11(sp) +; RV32I-NEXT: sb a0, 11(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a3, zero, 4 ; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a3, 11(sp) -; RV32I-NEXT: bnez a0, .LBB53_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB53_4 ; RV32I-NEXT: .LBB53_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: andi a0, a3, 255 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a0, .LBB53_1 +; RV32I-NEXT: andi a1, a0, 255 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bgeu s0, a1, .LBB53_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB53_2 Depth=1 -; RV32I-NEXT: mv a2, s3 +; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB53_1 ; RV32I-NEXT: .LBB53_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -4774,27 +4774,27 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umin_i8_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a6, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB53_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a4, (a6) -; RV32IA-NEXT: and a2, a4, a3 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a3, a4, a6 ; RV32IA-NEXT: mv a5, a4 -; RV32IA-NEXT: bgeu a1, a2, .LBB53_3 +; RV32IA-NEXT: bgeu a1, a3, .LBB53_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB53_1 Depth=1 ; RV32IA-NEXT: xor a5, a4, a1 -; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: and a5, a5, a6 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB53_3: # in Loop: Header=BB53_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a5, a5, (a6) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB53_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umin_i8_acq_rel: @@ -4805,33 +4805,33 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lbu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: andi s1, a1, 255 -; RV64I-NEXT: addi s2, sp, 7 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: andi s0, a1, 255 +; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB53_2 ; RV64I-NEXT: .LBB53_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB53_2 Depth=1 -; RV64I-NEXT: sb a3, 7(sp) +; RV64I-NEXT: sb a0, 7(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 4 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a3, 7(sp) -; RV64I-NEXT: bnez a0, .LBB53_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB53_4 ; RV64I-NEXT: .LBB53_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: andi a0, a3, 255 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a0, .LBB53_1 +; RV64I-NEXT: andi a1, a0, 255 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s0, a1, .LBB53_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB53_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB53_1 ; RV64I-NEXT: .LBB53_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -4842,27 +4842,27 @@ define i8 @atomicrmw_umin_i8_acq_rel(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i8_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB53_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a4, (a6) -; RV64IA-NEXT: and a2, a4, a3 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 ; RV64IA-NEXT: mv a5, a4 -; RV64IA-NEXT: bgeu a1, a2, .LBB53_3 +; RV64IA-NEXT: bgeu a1, a3, .LBB53_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB53_1 Depth=1 ; RV64IA-NEXT: xor a5, a4, a1 -; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: and a5, a5, a6 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB53_3: # in Loop: Header=BB53_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a5, a5, (a6) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB53_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b acq_rel ret i8 %1 @@ -4877,33 +4877,33 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lbu a3, 0(a0) -; RV32I-NEXT: mv s3, a1 -; RV32I-NEXT: andi s1, a1, 255 -; RV32I-NEXT: addi s2, sp, 11 +; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lbu a0, 0(a0) +; RV32I-NEXT: andi s0, a1, 255 +; RV32I-NEXT: addi s3, sp, 11 ; RV32I-NEXT: j .LBB54_2 ; RV32I-NEXT: .LBB54_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB54_2 Depth=1 -; RV32I-NEXT: sb a3, 11(sp) +; RV32I-NEXT: sb a0, 11(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a3, zero, 5 ; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_1 -; RV32I-NEXT: lb a3, 11(sp) -; RV32I-NEXT: bnez a0, .LBB54_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lb a0, 11(sp) +; RV32I-NEXT: bnez a1, .LBB54_4 ; RV32I-NEXT: .LBB54_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: andi a0, a3, 255 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a0, .LBB54_1 +; RV32I-NEXT: andi a1, a0, 255 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bgeu s0, a1, .LBB54_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB54_2 Depth=1 -; RV32I-NEXT: mv a2, s3 +; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB54_1 ; RV32I-NEXT: .LBB54_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -4914,27 +4914,27 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umin_i8_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 ; RV32IA-NEXT: addi a3, zero, 255 -; RV32IA-NEXT: sll a3, a3, a0 +; RV32IA-NEXT: sll a6, a3, a2 ; RV32IA-NEXT: andi a1, a1, 255 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB54_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a4, (a6) -; RV32IA-NEXT: and a2, a4, a3 +; RV32IA-NEXT: lr.w.aqrl a4, (a0) +; RV32IA-NEXT: and a3, a4, a6 ; RV32IA-NEXT: mv a5, a4 -; RV32IA-NEXT: bgeu a1, a2, .LBB54_3 +; RV32IA-NEXT: bgeu a1, a3, .LBB54_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB54_1 Depth=1 ; RV32IA-NEXT: xor a5, a4, a1 -; RV32IA-NEXT: and a5, a5, a3 +; RV32IA-NEXT: and a5, a5, a6 ; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB54_3: # in Loop: Header=BB54_1 Depth=1 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a6) +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB54_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a4, a0 +; RV32IA-NEXT: srl a0, a4, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umin_i8_seq_cst: @@ -4945,33 +4945,33 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lbu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: andi s1, a1, 255 -; RV64I-NEXT: addi s2, sp, 7 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lbu a0, 0(a0) +; RV64I-NEXT: andi s0, a1, 255 +; RV64I-NEXT: addi s3, sp, 7 ; RV64I-NEXT: j .LBB54_2 ; RV64I-NEXT: .LBB54_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB54_2 Depth=1 -; RV64I-NEXT: sb a3, 7(sp) +; RV64I-NEXT: sb a0, 7(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 5 ; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_1 -; RV64I-NEXT: lb a3, 7(sp) -; RV64I-NEXT: bnez a0, .LBB54_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lb a0, 7(sp) +; RV64I-NEXT: bnez a1, .LBB54_4 ; RV64I-NEXT: .LBB54_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: andi a0, a3, 255 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a0, .LBB54_1 +; RV64I-NEXT: andi a1, a0, 255 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s0, a1, .LBB54_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB54_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB54_1 ; RV64I-NEXT: .LBB54_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -4982,27 +4982,27 @@ define i8 @atomicrmw_umin_i8_seq_cst(i8 *%a, i8 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i8_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 ; RV64IA-NEXT: addi a3, zero, 255 -; RV64IA-NEXT: sllw a3, a3, a0 +; RV64IA-NEXT: sllw a6, a3, a2 ; RV64IA-NEXT: andi a1, a1, 255 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB54_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a4, (a6) -; RV64IA-NEXT: and a2, a4, a3 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a3, a4, a6 ; RV64IA-NEXT: mv a5, a4 -; RV64IA-NEXT: bgeu a1, a2, .LBB54_3 +; RV64IA-NEXT: bgeu a1, a3, .LBB54_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB54_1 Depth=1 ; RV64IA-NEXT: xor a5, a4, a1 -; RV64IA-NEXT: and a5, a5, a3 +; RV64IA-NEXT: and a5, a5, a6 ; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB54_3: # in Loop: Header=BB54_1 Depth=1 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a6) +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB54_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a4, a0 +; RV64IA-NEXT: srlw a0, a4, a2 ; RV64IA-NEXT: ret %1 = atomicrmw umin i8* %a, i8 %b seq_cst ret i8 %1 @@ -5021,24 +5021,24 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xchg_i16_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a3, (a2) +; RV32IA-NEXT: lr.w a4, (a0) ; RV32IA-NEXT: add a5, zero, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB55_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i16_monotonic: @@ -5053,24 +5053,24 @@ define i16 @atomicrmw_xchg_i16_monotonic(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xchg_i16_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a3, (a2) +; RV64IA-NEXT: lr.w a4, (a0) ; RV64IA-NEXT: add a5, zero, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB55_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b monotonic ret i16 %1 @@ -5089,24 +5089,24 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xchg_i16_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a3, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) ; RV32IA-NEXT: add a5, zero, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB56_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i16_acquire: @@ -5121,24 +5121,24 @@ define i16 @atomicrmw_xchg_i16_acquire(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xchg_i16_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB56_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a3, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) ; RV64IA-NEXT: add a5, zero, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB56_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b acquire ret i16 %1 @@ -5157,24 +5157,24 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xchg_i16_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a3, (a2) +; RV32IA-NEXT: lr.w a4, (a0) ; RV32IA-NEXT: add a5, zero, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB57_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i16_release: @@ -5189,24 +5189,24 @@ define i16 @atomicrmw_xchg_i16_release(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xchg_i16_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB57_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a3, (a2) +; RV64IA-NEXT: lr.w a4, (a0) ; RV64IA-NEXT: add a5, zero, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB57_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b release ret i16 %1 @@ -5225,24 +5225,24 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xchg_i16_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a3, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) ; RV32IA-NEXT: add a5, zero, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB58_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i16_acq_rel: @@ -5257,24 +5257,24 @@ define i16 @atomicrmw_xchg_i16_acq_rel(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xchg_i16_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a3, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) ; RV64IA-NEXT: add a5, zero, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB58_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b acq_rel ret i16 %1 @@ -5293,24 +5293,24 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xchg_i16_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a3, (a2) +; RV32IA-NEXT: lr.w.aqrl a4, (a0) ; RV32IA-NEXT: add a5, zero, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB59_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xchg_i16_seq_cst: @@ -5325,24 +5325,24 @@ define i16 @atomicrmw_xchg_i16_seq_cst(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xchg_i16_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a3, (a2) +; RV64IA-NEXT: lr.w.aqrl a4, (a0) ; RV64IA-NEXT: add a5, zero, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB59_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw xchg i16* %a, i16 %b seq_cst ret i16 %1 @@ -5361,24 +5361,24 @@ define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_add_i16_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a3, (a2) -; RV32IA-NEXT: add a5, a3, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: add a5, a4, a1 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB60_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_add_i16_monotonic: @@ -5393,24 +5393,24 @@ define i16 @atomicrmw_add_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i16_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a3, (a2) -; RV64IA-NEXT: add a5, a3, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB60_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b monotonic ret i16 %1 @@ -5429,24 +5429,24 @@ define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_add_i16_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a3, (a2) -; RV32IA-NEXT: add a5, a3, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: add a5, a4, a1 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB61_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_add_i16_acquire: @@ -5461,24 +5461,24 @@ define i16 @atomicrmw_add_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i16_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a3, (a2) -; RV64IA-NEXT: add a5, a3, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB61_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b acquire ret i16 %1 @@ -5497,24 +5497,24 @@ define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_add_i16_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a3, (a2) -; RV32IA-NEXT: add a5, a3, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: add a5, a4, a1 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB62_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_add_i16_release: @@ -5529,24 +5529,24 @@ define i16 @atomicrmw_add_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i16_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a3, (a2) -; RV64IA-NEXT: add a5, a3, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB62_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b release ret i16 %1 @@ -5565,24 +5565,24 @@ define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_add_i16_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a3, (a2) -; RV32IA-NEXT: add a5, a3, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: add a5, a4, a1 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB63_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_add_i16_acq_rel: @@ -5597,24 +5597,24 @@ define i16 @atomicrmw_add_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i16_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a3, (a2) -; RV64IA-NEXT: add a5, a3, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB63_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b acq_rel ret i16 %1 @@ -5633,24 +5633,24 @@ define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_add_i16_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a3, (a2) -; RV32IA-NEXT: add a5, a3, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV32IA-NEXT: lr.w.aqrl a4, (a0) +; RV32IA-NEXT: add a5, a4, a1 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB64_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_add_i16_seq_cst: @@ -5665,24 +5665,24 @@ define i16 @atomicrmw_add_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_add_i16_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a3, (a2) -; RV64IA-NEXT: add a5, a3, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: add a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB64_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw add i16* %a, i16 %b seq_cst ret i16 %1 @@ -5701,24 +5701,24 @@ define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_sub_i16_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB65_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a3, (a2) -; RV32IA-NEXT: sub a5, a3, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: sub a5, a4, a1 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB65_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i16_monotonic: @@ -5733,24 +5733,24 @@ define i16 @atomicrmw_sub_i16_monotonic(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_sub_i16_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB65_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a3, (a2) -; RV64IA-NEXT: sub a5, a3, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB65_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b monotonic ret i16 %1 @@ -5769,24 +5769,24 @@ define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_sub_i16_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB66_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a3, (a2) -; RV32IA-NEXT: sub a5, a3, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: sub a5, a4, a1 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB66_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i16_acquire: @@ -5801,24 +5801,24 @@ define i16 @atomicrmw_sub_i16_acquire(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_sub_i16_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB66_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a3, (a2) -; RV64IA-NEXT: sub a5, a3, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB66_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b acquire ret i16 %1 @@ -5837,24 +5837,24 @@ define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_sub_i16_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB67_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a3, (a2) -; RV32IA-NEXT: sub a5, a3, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: sub a5, a4, a1 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB67_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i16_release: @@ -5869,24 +5869,24 @@ define i16 @atomicrmw_sub_i16_release(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_sub_i16_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB67_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a3, (a2) -; RV64IA-NEXT: sub a5, a3, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB67_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b release ret i16 %1 @@ -5905,24 +5905,24 @@ define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_sub_i16_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB68_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a3, (a2) -; RV32IA-NEXT: sub a5, a3, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: sub a5, a4, a1 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB68_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i16_acq_rel: @@ -5937,24 +5937,24 @@ define i16 @atomicrmw_sub_i16_acq_rel(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_sub_i16_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB68_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a3, (a2) -; RV64IA-NEXT: sub a5, a3, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB68_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b acq_rel ret i16 %1 @@ -5973,24 +5973,24 @@ define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_sub_i16_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB69_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a3, (a2) -; RV32IA-NEXT: sub a5, a3, a1 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV32IA-NEXT: lr.w.aqrl a4, (a0) +; RV32IA-NEXT: sub a5, a4, a1 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB69_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_sub_i16_seq_cst: @@ -6005,24 +6005,24 @@ define i16 @atomicrmw_sub_i16_seq_cst(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_sub_i16_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB69_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a3, (a2) -; RV64IA-NEXT: sub a5, a3, a1 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: sub a5, a4, a1 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB69_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw sub i16* %a, i16 %b seq_cst ret i16 %1 @@ -6041,18 +6041,18 @@ define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_and_i16_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: not a4, a4 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: or a1, a4, a1 -; RV32IA-NEXT: amoand.w a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: not a2, a2 +; RV32IA-NEXT: or a1, a2, a1 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoand.w a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_and_i16_monotonic: @@ -6067,18 +6067,18 @@ define i16 @atomicrmw_and_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_and_i16_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: not a4, a4 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: or a1, a4, a1 -; RV64IA-NEXT: amoand.w a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 ; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b monotonic ret i16 %1 @@ -6097,18 +6097,18 @@ define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_and_i16_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: not a4, a4 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: or a1, a4, a1 -; RV32IA-NEXT: amoand.w.aq a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: not a2, a2 +; RV32IA-NEXT: or a1, a2, a1 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoand.w.aq a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_and_i16_acquire: @@ -6123,18 +6123,18 @@ define i16 @atomicrmw_and_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_and_i16_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: not a4, a4 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: or a1, a4, a1 -; RV64IA-NEXT: amoand.w.aq a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 ; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b acquire ret i16 %1 @@ -6153,18 +6153,18 @@ define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_and_i16_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: not a4, a4 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: or a1, a4, a1 -; RV32IA-NEXT: amoand.w.rl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: not a2, a2 +; RV32IA-NEXT: or a1, a2, a1 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoand.w.rl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_and_i16_release: @@ -6179,18 +6179,18 @@ define i16 @atomicrmw_and_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_and_i16_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: not a4, a4 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: or a1, a4, a1 -; RV64IA-NEXT: amoand.w.rl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 ; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b release ret i16 %1 @@ -6209,18 +6209,18 @@ define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_and_i16_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: not a4, a4 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: or a1, a4, a1 -; RV32IA-NEXT: amoand.w.aqrl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: not a2, a2 +; RV32IA-NEXT: or a1, a2, a1 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_and_i16_acq_rel: @@ -6235,18 +6235,18 @@ define i16 @atomicrmw_and_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_and_i16_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: not a4, a4 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: or a1, a4, a1 -; RV64IA-NEXT: amoand.w.aqrl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 ; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b acq_rel ret i16 %1 @@ -6265,18 +6265,18 @@ define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_and_i16_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: not a4, a4 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: or a1, a4, a1 -; RV32IA-NEXT: amoand.w.aqrl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: not a2, a2 +; RV32IA-NEXT: or a1, a2, a1 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_and_i16_seq_cst: @@ -6291,18 +6291,18 @@ define i16 @atomicrmw_and_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_and_i16_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: not a4, a4 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: or a1, a4, a1 -; RV64IA-NEXT: amoand.w.aqrl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: not a2, a2 +; RV64IA-NEXT: or a1, a2, a1 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoand.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a3 ; RV64IA-NEXT: ret %1 = atomicrmw and i16* %a, i16 %b seq_cst ret i16 %1 @@ -6321,25 +6321,25 @@ define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_nand_i16_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB75_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a3, (a2) -; RV32IA-NEXT: and a5, a3, a1 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a5, a4, a1 ; RV32IA-NEXT: not a5, a5 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB75_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i16_monotonic: @@ -6354,25 +6354,25 @@ define i16 @atomicrmw_nand_i16_monotonic(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_nand_i16_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB75_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a3, (a2) -; RV64IA-NEXT: and a5, a3, a1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 ; RV64IA-NEXT: not a5, a5 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB75_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b monotonic ret i16 %1 @@ -6391,25 +6391,25 @@ define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_nand_i16_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB76_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a3, (a2) -; RV32IA-NEXT: and a5, a3, a1 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a5, a4, a1 ; RV32IA-NEXT: not a5, a5 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w a5, a5, (a2) +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB76_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i16_acquire: @@ -6424,25 +6424,25 @@ define i16 @atomicrmw_nand_i16_acquire(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_nand_i16_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB76_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a3, (a2) -; RV64IA-NEXT: and a5, a3, a1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 ; RV64IA-NEXT: not a5, a5 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w a5, a5, (a2) +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB76_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b acquire ret i16 %1 @@ -6461,25 +6461,25 @@ define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_nand_i16_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB77_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a3, (a2) -; RV32IA-NEXT: and a5, a3, a1 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a5, a4, a1 ; RV32IA-NEXT: not a5, a5 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB77_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i16_release: @@ -6494,25 +6494,25 @@ define i16 @atomicrmw_nand_i16_release(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_nand_i16_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB77_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a3, (a2) -; RV64IA-NEXT: and a5, a3, a1 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 ; RV64IA-NEXT: not a5, a5 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB77_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b release ret i16 %1 @@ -6531,25 +6531,25 @@ define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_nand_i16_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB78_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a3, (a2) -; RV32IA-NEXT: and a5, a3, a1 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a5, a4, a1 ; RV32IA-NEXT: not a5, a5 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w.rl a5, a5, (a2) +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB78_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i16_acq_rel: @@ -6564,25 +6564,25 @@ define i16 @atomicrmw_nand_i16_acq_rel(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_nand_i16_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB78_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a3, (a2) -; RV64IA-NEXT: and a5, a3, a1 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 ; RV64IA-NEXT: not a5, a5 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w.rl a5, a5, (a2) +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB78_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b acq_rel ret i16 %1 @@ -6601,25 +6601,25 @@ define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_nand_i16_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a2, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB79_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a3, (a2) -; RV32IA-NEXT: and a5, a3, a1 +; RV32IA-NEXT: lr.w.aqrl a4, (a0) +; RV32IA-NEXT: and a5, a4, a1 ; RV32IA-NEXT: not a5, a5 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: and a5, a5, a2 +; RV32IA-NEXT: xor a5, a4, a5 +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB79_1 ; RV32IA-NEXT: # %bb.2: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_nand_i16_seq_cst: @@ -6634,25 +6634,25 @@ define i16 @atomicrmw_nand_i16_seq_cst(i16* %a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_nand_i16_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a2, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB79_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a3, (a2) -; RV64IA-NEXT: and a5, a3, a1 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a5, a4, a1 ; RV64IA-NEXT: not a5, a5 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2) +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: and a5, a5, a2 +; RV64IA-NEXT: xor a5, a4, a5 +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB79_1 ; RV64IA-NEXT: # %bb.2: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw nand i16* %a, i16 %b seq_cst ret i16 %1 @@ -6671,15 +6671,15 @@ define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_or_i16_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoor.w a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoor.w a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_or_i16_monotonic: @@ -6694,15 +6694,15 @@ define i16 @atomicrmw_or_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_or_i16_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoor.w a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b monotonic ret i16 %1 @@ -6721,15 +6721,15 @@ define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_or_i16_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoor.w.aq a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoor.w.aq a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_or_i16_acquire: @@ -6744,15 +6744,15 @@ define i16 @atomicrmw_or_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_or_i16_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoor.w.aq a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b acquire ret i16 %1 @@ -6771,15 +6771,15 @@ define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_or_i16_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoor.w.rl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoor.w.rl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_or_i16_release: @@ -6794,15 +6794,15 @@ define i16 @atomicrmw_or_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_or_i16_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoor.w.rl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b release ret i16 %1 @@ -6821,15 +6821,15 @@ define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_or_i16_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoor.w.aqrl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_or_i16_acq_rel: @@ -6844,15 +6844,15 @@ define i16 @atomicrmw_or_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_or_i16_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoor.w.aqrl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b acq_rel ret i16 %1 @@ -6871,15 +6871,15 @@ define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_or_i16_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoor.w.aqrl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_or_i16_seq_cst: @@ -6894,15 +6894,15 @@ define i16 @atomicrmw_or_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_or_i16_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoor.w.aqrl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw or i16* %a, i16 %b seq_cst ret i16 %1 @@ -6921,15 +6921,15 @@ define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xor_i16_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoxor.w a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoxor.w a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xor_i16_monotonic: @@ -6944,15 +6944,15 @@ define i16 @atomicrmw_xor_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xor_i16_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoxor.w a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b monotonic ret i16 %1 @@ -6971,15 +6971,15 @@ define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xor_i16_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoxor.w.aq a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoxor.w.aq a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xor_i16_acquire: @@ -6994,15 +6994,15 @@ define i16 @atomicrmw_xor_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xor_i16_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoxor.w.aq a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aq a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b acquire ret i16 %1 @@ -7021,15 +7021,15 @@ define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xor_i16_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoxor.w.rl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoxor.w.rl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xor_i16_release: @@ -7044,15 +7044,15 @@ define i16 @atomicrmw_xor_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xor_i16_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoxor.w.rl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.rl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b release ret i16 %1 @@ -7071,15 +7071,15 @@ define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xor_i16_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoxor.w.aqrl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xor_i16_acq_rel: @@ -7094,15 +7094,15 @@ define i16 @atomicrmw_xor_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xor_i16_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoxor.w.aqrl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b acq_rel ret i16 %1 @@ -7121,15 +7121,15 @@ define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_xor_i16_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a2, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: amoxor.w.aqrl a1, a1, (a2) -; RV32IA-NEXT: srl a0, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 +; RV32IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV32IA-NEXT: srl a0, a0, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_xor_i16_seq_cst: @@ -7144,15 +7144,15 @@ define i16 @atomicrmw_xor_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_xor_i16_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a2, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: amoxor.w.aqrl a1, a1, (a2) -; RV64IA-NEXT: srlw a0, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 +; RV64IA-NEXT: amoxor.w.aqrl a0, a1, (a0) +; RV64IA-NEXT: srlw a0, a0, a2 ; RV64IA-NEXT: ret %1 = atomicrmw xor i16* %a, i16 %b seq_cst ret i16 %1 @@ -7169,33 +7169,33 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 16 -; RV32I-NEXT: srai s0, a0, 16 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srai s0, a1, 16 ; RV32I-NEXT: addi s3, sp, 10 ; RV32I-NEXT: j .LBB90_2 ; RV32I-NEXT: .LBB90_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB90_2 Depth=1 -; RV32I-NEXT: sh a1, 10(sp) +; RV32I-NEXT: sh a0, 10(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 10(sp) -; RV32I-NEXT: bnez a0, .LBB90_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 10(sp) +; RV32I-NEXT: bnez a1, .LBB90_4 ; RV32I-NEXT: .LBB90_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt s0, a0, .LBB90_1 +; RV32I-NEXT: slli a1, a0, 16 +; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: blt s0, a1, .LBB90_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB90_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB90_1 ; RV32I-NEXT: .LBB90_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -7206,33 +7206,33 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_max_i16_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 16 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: lui a4, 16 +; RV32IA-NEXT: addi a4, a4, -1 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 16 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a3, a1, .LBB90_3 +; RV32IA-NEXT: lr.w a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a4, a1, .LBB90_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB90_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB90_3: # in Loop: Header=BB90_1 Depth=1 -; RV32IA-NEXT: sc.w a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB90_1 +; RV32IA-NEXT: sc.w a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB90_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i16_monotonic: @@ -7245,33 +7245,33 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 48 -; RV64I-NEXT: srai s0, a0, 48 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srai s0, a1, 48 ; RV64I-NEXT: addi s3, sp, 6 ; RV64I-NEXT: j .LBB90_2 ; RV64I-NEXT: .LBB90_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB90_2 Depth=1 -; RV64I-NEXT: sh a1, 6(sp) +; RV64I-NEXT: sh a0, 6(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 6(sp) -; RV64I-NEXT: bnez a0, .LBB90_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 6(sp) +; RV64I-NEXT: bnez a1, .LBB90_4 ; RV64I-NEXT: .LBB90_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt s0, a0, .LBB90_1 +; RV64I-NEXT: slli a1, a0, 48 +; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB90_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB90_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB90_1 ; RV64I-NEXT: .LBB90_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -7282,33 +7282,33 @@ define i16 @atomicrmw_max_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_max_i16_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 48 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a3, a1, .LBB90_3 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB90_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB90_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB90_3: # in Loop: Header=BB90_1 Depth=1 -; RV64IA-NEXT: sc.w a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB90_1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB90_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b monotonic ret i16 %1 @@ -7325,33 +7325,33 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 16 -; RV32I-NEXT: srai s0, a0, 16 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srai s0, a1, 16 ; RV32I-NEXT: addi s3, sp, 10 ; RV32I-NEXT: j .LBB91_2 ; RV32I-NEXT: .LBB91_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB91_2 Depth=1 -; RV32I-NEXT: sh a1, 10(sp) -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sh a0, 10(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 10(sp) -; RV32I-NEXT: bnez a0, .LBB91_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 10(sp) +; RV32I-NEXT: bnez a1, .LBB91_4 ; RV32I-NEXT: .LBB91_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt s0, a0, .LBB91_1 +; RV32I-NEXT: slli a1, a0, 16 +; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: blt s0, a1, .LBB91_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB91_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB91_1 ; RV32I-NEXT: .LBB91_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -7362,33 +7362,33 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_max_i16_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 16 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: lui a4, 16 +; RV32IA-NEXT: addi a4, a4, -1 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 16 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a3, a1, .LBB91_3 +; RV32IA-NEXT: lr.w.aq a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a4, a1, .LBB91_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB91_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB91_3: # in Loop: Header=BB91_1 Depth=1 -; RV32IA-NEXT: sc.w a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB91_1 +; RV32IA-NEXT: sc.w a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB91_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i16_acquire: @@ -7401,33 +7401,33 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 48 -; RV64I-NEXT: srai s0, a0, 48 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srai s0, a1, 48 ; RV64I-NEXT: addi s3, sp, 6 ; RV64I-NEXT: j .LBB91_2 ; RV64I-NEXT: .LBB91_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB91_2 Depth=1 -; RV64I-NEXT: sh a1, 6(sp) -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sh a0, 6(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 2 +; RV64I-NEXT: addi a4, zero, 2 ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 6(sp) -; RV64I-NEXT: bnez a0, .LBB91_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 6(sp) +; RV64I-NEXT: bnez a1, .LBB91_4 ; RV64I-NEXT: .LBB91_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt s0, a0, .LBB91_1 +; RV64I-NEXT: slli a1, a0, 48 +; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB91_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB91_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB91_1 ; RV64I-NEXT: .LBB91_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -7438,33 +7438,33 @@ define i16 @atomicrmw_max_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_max_i16_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 48 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a3, a1, .LBB91_3 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB91_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB91_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB91_3: # in Loop: Header=BB91_1 Depth=1 -; RV64IA-NEXT: sc.w a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB91_1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB91_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b acquire ret i16 %1 @@ -7481,33 +7481,33 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 16 -; RV32I-NEXT: srai s0, a0, 16 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srai s0, a1, 16 ; RV32I-NEXT: addi s3, sp, 10 ; RV32I-NEXT: j .LBB92_2 ; RV32I-NEXT: .LBB92_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB92_2 Depth=1 -; RV32I-NEXT: sh a1, 10(sp) -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: sh a0, 10(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 10(sp) -; RV32I-NEXT: bnez a0, .LBB92_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 10(sp) +; RV32I-NEXT: bnez a1, .LBB92_4 ; RV32I-NEXT: .LBB92_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt s0, a0, .LBB92_1 +; RV32I-NEXT: slli a1, a0, 16 +; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: blt s0, a1, .LBB92_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB92_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB92_1 ; RV32I-NEXT: .LBB92_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -7518,33 +7518,33 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_max_i16_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 16 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: lui a4, 16 +; RV32IA-NEXT: addi a4, a4, -1 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 16 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a3, a1, .LBB92_3 +; RV32IA-NEXT: lr.w a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a4, a1, .LBB92_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB92_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB92_3: # in Loop: Header=BB92_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB92_1 +; RV32IA-NEXT: sc.w.rl a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB92_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i16_release: @@ -7557,33 +7557,33 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 48 -; RV64I-NEXT: srai s0, a0, 48 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srai s0, a1, 48 ; RV64I-NEXT: addi s3, sp, 6 ; RV64I-NEXT: j .LBB92_2 ; RV64I-NEXT: .LBB92_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB92_2 Depth=1 -; RV64I-NEXT: sh a1, 6(sp) -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: sh a0, 6(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 3 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 6(sp) -; RV64I-NEXT: bnez a0, .LBB92_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 6(sp) +; RV64I-NEXT: bnez a1, .LBB92_4 ; RV64I-NEXT: .LBB92_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt s0, a0, .LBB92_1 +; RV64I-NEXT: slli a1, a0, 48 +; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB92_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB92_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB92_1 ; RV64I-NEXT: .LBB92_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -7594,33 +7594,33 @@ define i16 @atomicrmw_max_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_max_i16_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 48 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a3, a1, .LBB92_3 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB92_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB92_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB92_3: # in Loop: Header=BB92_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB92_1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB92_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b release ret i16 %1 @@ -7637,33 +7637,33 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 16 -; RV32I-NEXT: srai s0, a0, 16 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srai s0, a1, 16 ; RV32I-NEXT: addi s3, sp, 10 ; RV32I-NEXT: j .LBB93_2 ; RV32I-NEXT: .LBB93_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB93_2 Depth=1 -; RV32I-NEXT: sh a1, 10(sp) -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sh a0, 10(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 10(sp) -; RV32I-NEXT: bnez a0, .LBB93_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 10(sp) +; RV32I-NEXT: bnez a1, .LBB93_4 ; RV32I-NEXT: .LBB93_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt s0, a0, .LBB93_1 +; RV32I-NEXT: slli a1, a0, 16 +; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: blt s0, a1, .LBB93_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB93_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB93_1 ; RV32I-NEXT: .LBB93_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -7674,33 +7674,33 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_max_i16_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 16 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: lui a4, 16 +; RV32IA-NEXT: addi a4, a4, -1 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 16 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a3, a1, .LBB93_3 +; RV32IA-NEXT: lr.w.aq a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a4, a1, .LBB93_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB93_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB93_3: # in Loop: Header=BB93_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB93_1 +; RV32IA-NEXT: sc.w.rl a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB93_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i16_acq_rel: @@ -7713,33 +7713,33 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 48 -; RV64I-NEXT: srai s0, a0, 48 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srai s0, a1, 48 ; RV64I-NEXT: addi s3, sp, 6 ; RV64I-NEXT: j .LBB93_2 ; RV64I-NEXT: .LBB93_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB93_2 Depth=1 -; RV64I-NEXT: sh a1, 6(sp) -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sh a0, 6(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 4 +; RV64I-NEXT: addi a4, zero, 2 ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 6(sp) -; RV64I-NEXT: bnez a0, .LBB93_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 6(sp) +; RV64I-NEXT: bnez a1, .LBB93_4 ; RV64I-NEXT: .LBB93_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt s0, a0, .LBB93_1 +; RV64I-NEXT: slli a1, a0, 48 +; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB93_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB93_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB93_1 ; RV64I-NEXT: .LBB93_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -7750,33 +7750,33 @@ define i16 @atomicrmw_max_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_max_i16_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 48 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a3, a1, .LBB93_3 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB93_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB93_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB93_3: # in Loop: Header=BB93_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB93_1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB93_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b acq_rel ret i16 %1 @@ -7793,33 +7793,33 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 16 -; RV32I-NEXT: srai s0, a0, 16 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srai s0, a1, 16 ; RV32I-NEXT: addi s3, sp, 10 ; RV32I-NEXT: j .LBB94_2 ; RV32I-NEXT: .LBB94_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB94_2 Depth=1 -; RV32I-NEXT: sh a1, 10(sp) -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: sh a0, 10(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 10(sp) -; RV32I-NEXT: bnez a0, .LBB94_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 10(sp) +; RV32I-NEXT: bnez a1, .LBB94_4 ; RV32I-NEXT: .LBB94_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: blt s0, a0, .LBB94_1 +; RV32I-NEXT: slli a1, a0, 16 +; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: blt s0, a1, .LBB94_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB94_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB94_1 ; RV32I-NEXT: .LBB94_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -7830,33 +7830,33 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_max_i16_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 16 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: lui a4, 16 +; RV32IA-NEXT: addi a4, a4, -1 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 16 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a3, a1, .LBB94_3 +; RV32IA-NEXT: lr.w.aqrl a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a4, a1, .LBB94_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB94_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB94_3: # in Loop: Header=BB94_1 Depth=1 -; RV32IA-NEXT: sc.w.aqrl a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB94_1 +; RV32IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB94_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_max_i16_seq_cst: @@ -7869,33 +7869,33 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 48 -; RV64I-NEXT: srai s0, a0, 48 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srai s0, a1, 48 ; RV64I-NEXT: addi s3, sp, 6 ; RV64I-NEXT: j .LBB94_2 ; RV64I-NEXT: .LBB94_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB94_2 Depth=1 -; RV64I-NEXT: sh a1, 6(sp) -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: sh a0, 6(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 5 +; RV64I-NEXT: addi a4, zero, 5 ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 6(sp) -; RV64I-NEXT: bnez a0, .LBB94_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 6(sp) +; RV64I-NEXT: bnez a1, .LBB94_4 ; RV64I-NEXT: .LBB94_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: blt s0, a0, .LBB94_1 +; RV64I-NEXT: slli a1, a0, 48 +; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB94_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB94_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB94_1 ; RV64I-NEXT: .LBB94_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -7906,33 +7906,33 @@ define i16 @atomicrmw_max_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_max_i16_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 48 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a3, a1, .LBB94_3 +; RV64IA-NEXT: lr.w.aqrl a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a4, a1, .LBB94_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB94_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB94_3: # in Loop: Header=BB94_1 Depth=1 -; RV64IA-NEXT: sc.w.aqrl a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB94_1 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB94_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw max i16* %a, i16 %b seq_cst ret i16 %1 @@ -7949,33 +7949,33 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 16 -; RV32I-NEXT: srai s0, a0, 16 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srai s0, a1, 16 ; RV32I-NEXT: addi s3, sp, 10 ; RV32I-NEXT: j .LBB95_2 ; RV32I-NEXT: .LBB95_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB95_2 Depth=1 -; RV32I-NEXT: sh a1, 10(sp) +; RV32I-NEXT: sh a0, 10(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 10(sp) -; RV32I-NEXT: bnez a0, .LBB95_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 10(sp) +; RV32I-NEXT: bnez a1, .LBB95_4 ; RV32I-NEXT: .LBB95_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bge s0, a0, .LBB95_1 +; RV32I-NEXT: slli a1, a0, 16 +; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bge s0, a1, .LBB95_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB95_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB95_1 ; RV32I-NEXT: .LBB95_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -7986,33 +7986,33 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_min_i16_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 16 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: lui a4, 16 +; RV32IA-NEXT: addi a4, a4, -1 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 16 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a1, a3, .LBB95_3 +; RV32IA-NEXT: lr.w a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a1, a4, .LBB95_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB95_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB95_3: # in Loop: Header=BB95_1 Depth=1 -; RV32IA-NEXT: sc.w a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB95_1 +; RV32IA-NEXT: sc.w a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB95_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i16_monotonic: @@ -8025,33 +8025,33 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 48 -; RV64I-NEXT: srai s0, a0, 48 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srai s0, a1, 48 ; RV64I-NEXT: addi s3, sp, 6 ; RV64I-NEXT: j .LBB95_2 ; RV64I-NEXT: .LBB95_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB95_2 Depth=1 -; RV64I-NEXT: sh a1, 6(sp) +; RV64I-NEXT: sh a0, 6(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 6(sp) -; RV64I-NEXT: bnez a0, .LBB95_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 6(sp) +; RV64I-NEXT: bnez a1, .LBB95_4 ; RV64I-NEXT: .LBB95_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bge s0, a0, .LBB95_1 +; RV64I-NEXT: slli a1, a0, 48 +; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB95_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB95_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB95_1 ; RV64I-NEXT: .LBB95_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -8062,33 +8062,33 @@ define i16 @atomicrmw_min_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_min_i16_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 48 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a1, a3, .LBB95_3 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB95_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB95_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB95_3: # in Loop: Header=BB95_1 Depth=1 -; RV64IA-NEXT: sc.w a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB95_1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB95_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b monotonic ret i16 %1 @@ -8105,33 +8105,33 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 16 -; RV32I-NEXT: srai s0, a0, 16 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srai s0, a1, 16 ; RV32I-NEXT: addi s3, sp, 10 ; RV32I-NEXT: j .LBB96_2 ; RV32I-NEXT: .LBB96_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB96_2 Depth=1 -; RV32I-NEXT: sh a1, 10(sp) -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sh a0, 10(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 10(sp) -; RV32I-NEXT: bnez a0, .LBB96_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 10(sp) +; RV32I-NEXT: bnez a1, .LBB96_4 ; RV32I-NEXT: .LBB96_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bge s0, a0, .LBB96_1 +; RV32I-NEXT: slli a1, a0, 16 +; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bge s0, a1, .LBB96_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB96_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB96_1 ; RV32I-NEXT: .LBB96_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -8142,33 +8142,33 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_min_i16_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 16 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: lui a4, 16 +; RV32IA-NEXT: addi a4, a4, -1 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 16 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a1, a3, .LBB96_3 +; RV32IA-NEXT: lr.w.aq a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a1, a4, .LBB96_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB96_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB96_3: # in Loop: Header=BB96_1 Depth=1 -; RV32IA-NEXT: sc.w a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB96_1 +; RV32IA-NEXT: sc.w a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB96_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i16_acquire: @@ -8181,33 +8181,33 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 48 -; RV64I-NEXT: srai s0, a0, 48 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srai s0, a1, 48 ; RV64I-NEXT: addi s3, sp, 6 ; RV64I-NEXT: j .LBB96_2 ; RV64I-NEXT: .LBB96_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB96_2 Depth=1 -; RV64I-NEXT: sh a1, 6(sp) -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sh a0, 6(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 2 +; RV64I-NEXT: addi a4, zero, 2 ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 6(sp) -; RV64I-NEXT: bnez a0, .LBB96_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 6(sp) +; RV64I-NEXT: bnez a1, .LBB96_4 ; RV64I-NEXT: .LBB96_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bge s0, a0, .LBB96_1 +; RV64I-NEXT: slli a1, a0, 48 +; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB96_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB96_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB96_1 ; RV64I-NEXT: .LBB96_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -8218,33 +8218,33 @@ define i16 @atomicrmw_min_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_min_i16_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 48 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a1, a3, .LBB96_3 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB96_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB96_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB96_3: # in Loop: Header=BB96_1 Depth=1 -; RV64IA-NEXT: sc.w a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB96_1 +; RV64IA-NEXT: sc.w a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB96_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b acquire ret i16 %1 @@ -8261,33 +8261,33 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 16 -; RV32I-NEXT: srai s0, a0, 16 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srai s0, a1, 16 ; RV32I-NEXT: addi s3, sp, 10 ; RV32I-NEXT: j .LBB97_2 ; RV32I-NEXT: .LBB97_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB97_2 Depth=1 -; RV32I-NEXT: sh a1, 10(sp) -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: sh a0, 10(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 10(sp) -; RV32I-NEXT: bnez a0, .LBB97_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 10(sp) +; RV32I-NEXT: bnez a1, .LBB97_4 ; RV32I-NEXT: .LBB97_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bge s0, a0, .LBB97_1 +; RV32I-NEXT: slli a1, a0, 16 +; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bge s0, a1, .LBB97_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB97_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB97_1 ; RV32I-NEXT: .LBB97_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -8298,33 +8298,33 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_min_i16_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 16 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: lui a4, 16 +; RV32IA-NEXT: addi a4, a4, -1 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 16 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a1, a3, .LBB97_3 +; RV32IA-NEXT: lr.w a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a1, a4, .LBB97_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB97_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB97_3: # in Loop: Header=BB97_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB97_1 +; RV32IA-NEXT: sc.w.rl a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB97_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i16_release: @@ -8337,33 +8337,33 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 48 -; RV64I-NEXT: srai s0, a0, 48 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srai s0, a1, 48 ; RV64I-NEXT: addi s3, sp, 6 ; RV64I-NEXT: j .LBB97_2 ; RV64I-NEXT: .LBB97_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB97_2 Depth=1 -; RV64I-NEXT: sh a1, 6(sp) -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: sh a0, 6(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 3 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 6(sp) -; RV64I-NEXT: bnez a0, .LBB97_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 6(sp) +; RV64I-NEXT: bnez a1, .LBB97_4 ; RV64I-NEXT: .LBB97_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bge s0, a0, .LBB97_1 +; RV64I-NEXT: slli a1, a0, 48 +; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB97_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB97_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB97_1 ; RV64I-NEXT: .LBB97_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -8374,33 +8374,33 @@ define i16 @atomicrmw_min_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_min_i16_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 48 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a1, a3, .LBB97_3 +; RV64IA-NEXT: lr.w a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB97_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB97_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB97_3: # in Loop: Header=BB97_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB97_1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB97_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b release ret i16 %1 @@ -8417,33 +8417,33 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 16 -; RV32I-NEXT: srai s0, a0, 16 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srai s0, a1, 16 ; RV32I-NEXT: addi s3, sp, 10 ; RV32I-NEXT: j .LBB98_2 ; RV32I-NEXT: .LBB98_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB98_2 Depth=1 -; RV32I-NEXT: sh a1, 10(sp) -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sh a0, 10(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 10(sp) -; RV32I-NEXT: bnez a0, .LBB98_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 10(sp) +; RV32I-NEXT: bnez a1, .LBB98_4 ; RV32I-NEXT: .LBB98_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bge s0, a0, .LBB98_1 +; RV32I-NEXT: slli a1, a0, 16 +; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bge s0, a1, .LBB98_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB98_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB98_1 ; RV32I-NEXT: .LBB98_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -8454,33 +8454,33 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_min_i16_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 16 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: lui a4, 16 +; RV32IA-NEXT: addi a4, a4, -1 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 16 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB98_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a1, a3, .LBB98_3 +; RV32IA-NEXT: lr.w.aq a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a1, a4, .LBB98_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB98_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB98_3: # in Loop: Header=BB98_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB98_1 +; RV32IA-NEXT: sc.w.rl a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB98_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i16_acq_rel: @@ -8493,33 +8493,33 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 48 -; RV64I-NEXT: srai s0, a0, 48 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srai s0, a1, 48 ; RV64I-NEXT: addi s3, sp, 6 ; RV64I-NEXT: j .LBB98_2 ; RV64I-NEXT: .LBB98_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB98_2 Depth=1 -; RV64I-NEXT: sh a1, 6(sp) -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sh a0, 6(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 4 +; RV64I-NEXT: addi a4, zero, 2 ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 6(sp) -; RV64I-NEXT: bnez a0, .LBB98_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 6(sp) +; RV64I-NEXT: bnez a1, .LBB98_4 ; RV64I-NEXT: .LBB98_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bge s0, a0, .LBB98_1 +; RV64I-NEXT: slli a1, a0, 48 +; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB98_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB98_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB98_1 ; RV64I-NEXT: .LBB98_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -8530,33 +8530,33 @@ define i16 @atomicrmw_min_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_min_i16_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 48 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB98_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a1, a3, .LBB98_3 +; RV64IA-NEXT: lr.w.aq a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB98_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB98_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB98_3: # in Loop: Header=BB98_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB98_1 +; RV64IA-NEXT: sc.w.rl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB98_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b acq_rel ret i16 %1 @@ -8573,33 +8573,33 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s3, 12(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s1, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: slli a0, s2, 16 -; RV32I-NEXT: srai s0, a0, 16 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: slli a1, a1, 16 +; RV32I-NEXT: srai s0, a1, 16 ; RV32I-NEXT: addi s3, sp, 10 ; RV32I-NEXT: j .LBB99_2 ; RV32I-NEXT: .LBB99_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB99_2 Depth=1 -; RV32I-NEXT: sh a1, 10(sp) -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: sh a0, 10(sp) ; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 10(sp) -; RV32I-NEXT: bnez a0, .LBB99_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 10(sp) +; RV32I-NEXT: bnez a1, .LBB99_4 ; RV32I-NEXT: .LBB99_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: slli a0, a1, 16 -; RV32I-NEXT: srai a0, a0, 16 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bge s0, a0, .LBB99_1 +; RV32I-NEXT: slli a1, a0, 16 +; RV32I-NEXT: srai a1, a1, 16 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bge s0, a1, .LBB99_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB99_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB99_1 ; RV32I-NEXT: .LBB99_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -8610,33 +8610,33 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_min_i16_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a7, a3, a0 +; RV32IA-NEXT: slli a2, a0, 3 +; RV32IA-NEXT: andi a2, a2, 24 +; RV32IA-NEXT: addi a3, zero, 16 +; RV32IA-NEXT: sub a6, a3, a2 +; RV32IA-NEXT: lui a4, 16 +; RV32IA-NEXT: addi a4, a4, -1 +; RV32IA-NEXT: sll a7, a4, a2 ; RV32IA-NEXT: slli a1, a1, 16 ; RV32IA-NEXT: srai a1, a1, 16 -; RV32IA-NEXT: sll a1, a1, a0 -; RV32IA-NEXT: addi a4, zero, 16 -; RV32IA-NEXT: sub a4, a4, a0 +; RV32IA-NEXT: sll a1, a1, a2 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB99_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a5, (a6) -; RV32IA-NEXT: and a3, a5, a7 -; RV32IA-NEXT: mv a2, a5 -; RV32IA-NEXT: sll a3, a3, a4 -; RV32IA-NEXT: sra a3, a3, a4 -; RV32IA-NEXT: bge a1, a3, .LBB99_3 +; RV32IA-NEXT: lr.w.aqrl a5, (a0) +; RV32IA-NEXT: and a4, a5, a7 +; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sll a4, a4, a6 +; RV32IA-NEXT: sra a4, a4, a6 +; RV32IA-NEXT: bge a1, a4, .LBB99_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB99_1 Depth=1 -; RV32IA-NEXT: xor a2, a5, a1 -; RV32IA-NEXT: and a2, a2, a7 -; RV32IA-NEXT: xor a2, a5, a2 +; RV32IA-NEXT: xor a3, a5, a1 +; RV32IA-NEXT: and a3, a3, a7 +; RV32IA-NEXT: xor a3, a5, a3 ; RV32IA-NEXT: .LBB99_3: # in Loop: Header=BB99_1 Depth=1 -; RV32IA-NEXT: sc.w.aqrl a2, a2, (a6) -; RV32IA-NEXT: bnez a2, .LBB99_1 +; RV32IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV32IA-NEXT: bnez a3, .LBB99_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a5, a0 +; RV32IA-NEXT: srl a0, a5, a2 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_min_i16_seq_cst: @@ -8649,33 +8649,33 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s3, 8(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s1, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: slli a0, s2, 48 -; RV64I-NEXT: srai s0, a0, 48 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: slli a1, a1, 48 +; RV64I-NEXT: srai s0, a1, 48 ; RV64I-NEXT: addi s3, sp, 6 ; RV64I-NEXT: j .LBB99_2 ; RV64I-NEXT: .LBB99_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB99_2 Depth=1 -; RV64I-NEXT: sh a1, 6(sp) -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: sh a0, 6(sp) ; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 5 +; RV64I-NEXT: addi a4, zero, 5 ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 6(sp) -; RV64I-NEXT: bnez a0, .LBB99_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 6(sp) +; RV64I-NEXT: bnez a1, .LBB99_4 ; RV64I-NEXT: .LBB99_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: slli a0, a1, 48 -; RV64I-NEXT: srai a0, a0, 48 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bge s0, a0, .LBB99_1 +; RV64I-NEXT: slli a1, a0, 48 +; RV64I-NEXT: srai a1, a1, 48 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB99_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB99_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB99_1 ; RV64I-NEXT: .LBB99_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -8686,33 +8686,33 @@ define i16 @atomicrmw_min_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_min_i16_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a7, a3, a0 +; RV64IA-NEXT: slli a2, a0, 3 +; RV64IA-NEXT: andi a2, a2, 24 +; RV64IA-NEXT: addi a3, zero, 48 +; RV64IA-NEXT: sub a6, a3, a2 +; RV64IA-NEXT: lui a4, 16 +; RV64IA-NEXT: addiw a4, a4, -1 +; RV64IA-NEXT: sllw a7, a4, a2 ; RV64IA-NEXT: slli a1, a1, 48 ; RV64IA-NEXT: srai a1, a1, 48 -; RV64IA-NEXT: sllw a1, a1, a0 -; RV64IA-NEXT: addi a4, zero, 48 -; RV64IA-NEXT: sub a4, a4, a0 +; RV64IA-NEXT: sllw a1, a1, a2 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB99_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a5, (a6) -; RV64IA-NEXT: and a3, a5, a7 -; RV64IA-NEXT: mv a2, a5 -; RV64IA-NEXT: sll a3, a3, a4 -; RV64IA-NEXT: sra a3, a3, a4 -; RV64IA-NEXT: bge a1, a3, .LBB99_3 +; RV64IA-NEXT: lr.w.aqrl a5, (a0) +; RV64IA-NEXT: and a4, a5, a7 +; RV64IA-NEXT: mv a3, a5 +; RV64IA-NEXT: sll a4, a4, a6 +; RV64IA-NEXT: sra a4, a4, a6 +; RV64IA-NEXT: bge a1, a4, .LBB99_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB99_1 Depth=1 -; RV64IA-NEXT: xor a2, a5, a1 -; RV64IA-NEXT: and a2, a2, a7 -; RV64IA-NEXT: xor a2, a5, a2 +; RV64IA-NEXT: xor a3, a5, a1 +; RV64IA-NEXT: and a3, a3, a7 +; RV64IA-NEXT: xor a3, a5, a3 ; RV64IA-NEXT: .LBB99_3: # in Loop: Header=BB99_1 Depth=1 -; RV64IA-NEXT: sc.w.aqrl a2, a2, (a6) -; RV64IA-NEXT: bnez a2, .LBB99_1 +; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0) +; RV64IA-NEXT: bnez a3, .LBB99_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a5, a0 +; RV64IA-NEXT: srlw a0, a5, a2 ; RV64IA-NEXT: ret %1 = atomicrmw min i16* %a, i16 %b seq_cst ret i16 %1 @@ -8730,33 +8730,33 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s4, 8(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s0, a0, -1 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi s0, a1, -1 ; RV32I-NEXT: and s1, s2, s0 ; RV32I-NEXT: addi s3, sp, 6 ; RV32I-NEXT: j .LBB100_2 ; RV32I-NEXT: .LBB100_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB100_2 Depth=1 -; RV32I-NEXT: sh a1, 6(sp) +; RV32I-NEXT: sh a0, 6(sp) ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 6(sp) -; RV32I-NEXT: bnez a0, .LBB100_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 6(sp) +; RV32I-NEXT: bnez a1, .LBB100_4 ; RV32I-NEXT: .LBB100_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: and a0, a1, s0 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bltu s1, a0, .LBB100_1 +; RV32I-NEXT: and a1, a0, s0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bltu s1, a1, .LBB100_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB100_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB100_1 ; RV32I-NEXT: .LBB100_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s4, 8(sp) ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) @@ -8768,28 +8768,28 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umax_i16_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a6, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB100_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a3, (a6) -; RV32IA-NEXT: and a2, a3, a4 -; RV32IA-NEXT: mv a5, a3 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a2, a4, a6 +; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: bgeu a2, a1, .LBB100_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB100_1 Depth=1 -; RV32IA-NEXT: xor a5, a3, a1 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: xor a5, a4, a1 +; RV32IA-NEXT: and a5, a5, a6 +; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB100_3: # in Loop: Header=BB100_1 Depth=1 -; RV32IA-NEXT: sc.w a5, a5, (a6) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB100_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umax_i16_monotonic: @@ -8803,33 +8803,33 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s4, 16(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s0, a0, -1 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: lui a1, 16 +; RV64I-NEXT: addiw s0, a1, -1 ; RV64I-NEXT: and s1, s2, s0 ; RV64I-NEXT: addi s3, sp, 14 ; RV64I-NEXT: j .LBB100_2 ; RV64I-NEXT: .LBB100_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB100_2 Depth=1 -; RV64I-NEXT: sh a1, 14(sp) +; RV64I-NEXT: sh a0, 14(sp) ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 14(sp) -; RV64I-NEXT: bnez a0, .LBB100_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 14(sp) +; RV64I-NEXT: bnez a1, .LBB100_4 ; RV64I-NEXT: .LBB100_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: and a0, a1, s0 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bltu s1, a0, .LBB100_1 +; RV64I-NEXT: and a1, a0, s0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s1, a1, .LBB100_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB100_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB100_1 ; RV64I-NEXT: .LBB100_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s4, 16(sp) ; RV64I-NEXT: ld s3, 24(sp) ; RV64I-NEXT: ld s2, 32(sp) @@ -8841,28 +8841,28 @@ define i16 @atomicrmw_umax_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i16_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB100_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a3, (a6) -; RV64IA-NEXT: and a2, a3, a4 -; RV64IA-NEXT: mv a5, a3 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 ; RV64IA-NEXT: bgeu a2, a1, .LBB100_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB100_1 Depth=1 -; RV64IA-NEXT: xor a5, a3, a1 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB100_3: # in Loop: Header=BB100_1 Depth=1 -; RV64IA-NEXT: sc.w a5, a5, (a6) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB100_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b monotonic ret i16 %1 @@ -8880,33 +8880,33 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s4, 8(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s0, a0, -1 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi s0, a1, -1 ; RV32I-NEXT: and s1, s2, s0 ; RV32I-NEXT: addi s3, sp, 6 ; RV32I-NEXT: j .LBB101_2 ; RV32I-NEXT: .LBB101_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB101_2 Depth=1 -; RV32I-NEXT: sh a1, 6(sp) -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sh a0, 6(sp) ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 6(sp) -; RV32I-NEXT: bnez a0, .LBB101_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 6(sp) +; RV32I-NEXT: bnez a1, .LBB101_4 ; RV32I-NEXT: .LBB101_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: and a0, a1, s0 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bltu s1, a0, .LBB101_1 +; RV32I-NEXT: and a1, a0, s0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bltu s1, a1, .LBB101_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB101_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB101_1 ; RV32I-NEXT: .LBB101_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s4, 8(sp) ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) @@ -8918,28 +8918,28 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umax_i16_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a6, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB101_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a3, (a6) -; RV32IA-NEXT: and a2, a3, a4 -; RV32IA-NEXT: mv a5, a3 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a2, a4, a6 +; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: bgeu a2, a1, .LBB101_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB101_1 Depth=1 -; RV32IA-NEXT: xor a5, a3, a1 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: xor a5, a4, a1 +; RV32IA-NEXT: and a5, a5, a6 +; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB101_3: # in Loop: Header=BB101_1 Depth=1 -; RV32IA-NEXT: sc.w a5, a5, (a6) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB101_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umax_i16_acquire: @@ -8953,33 +8953,33 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s4, 16(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s0, a0, -1 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: lui a1, 16 +; RV64I-NEXT: addiw s0, a1, -1 ; RV64I-NEXT: and s1, s2, s0 ; RV64I-NEXT: addi s3, sp, 14 ; RV64I-NEXT: j .LBB101_2 ; RV64I-NEXT: .LBB101_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB101_2 Depth=1 -; RV64I-NEXT: sh a1, 14(sp) -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sh a0, 14(sp) ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 2 +; RV64I-NEXT: addi a4, zero, 2 ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 14(sp) -; RV64I-NEXT: bnez a0, .LBB101_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 14(sp) +; RV64I-NEXT: bnez a1, .LBB101_4 ; RV64I-NEXT: .LBB101_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: and a0, a1, s0 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bltu s1, a0, .LBB101_1 +; RV64I-NEXT: and a1, a0, s0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s1, a1, .LBB101_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB101_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB101_1 ; RV64I-NEXT: .LBB101_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s4, 16(sp) ; RV64I-NEXT: ld s3, 24(sp) ; RV64I-NEXT: ld s2, 32(sp) @@ -8991,28 +8991,28 @@ define i16 @atomicrmw_umax_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i16_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB101_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a3, (a6) -; RV64IA-NEXT: and a2, a3, a4 -; RV64IA-NEXT: mv a5, a3 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 ; RV64IA-NEXT: bgeu a2, a1, .LBB101_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB101_1 Depth=1 -; RV64IA-NEXT: xor a5, a3, a1 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB101_3: # in Loop: Header=BB101_1 Depth=1 -; RV64IA-NEXT: sc.w a5, a5, (a6) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB101_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b acquire ret i16 %1 @@ -9030,33 +9030,33 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s4, 8(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s0, a0, -1 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi s0, a1, -1 ; RV32I-NEXT: and s1, s2, s0 ; RV32I-NEXT: addi s3, sp, 6 ; RV32I-NEXT: j .LBB102_2 ; RV32I-NEXT: .LBB102_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB102_2 Depth=1 -; RV32I-NEXT: sh a1, 6(sp) -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: sh a0, 6(sp) ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 6(sp) -; RV32I-NEXT: bnez a0, .LBB102_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 6(sp) +; RV32I-NEXT: bnez a1, .LBB102_4 ; RV32I-NEXT: .LBB102_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: and a0, a1, s0 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bltu s1, a0, .LBB102_1 +; RV32I-NEXT: and a1, a0, s0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bltu s1, a1, .LBB102_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB102_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB102_1 ; RV32I-NEXT: .LBB102_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s4, 8(sp) ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) @@ -9068,28 +9068,28 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umax_i16_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a6, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB102_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a3, (a6) -; RV32IA-NEXT: and a2, a3, a4 -; RV32IA-NEXT: mv a5, a3 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a2, a4, a6 +; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: bgeu a2, a1, .LBB102_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB102_1 Depth=1 -; RV32IA-NEXT: xor a5, a3, a1 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: xor a5, a4, a1 +; RV32IA-NEXT: and a5, a5, a6 +; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB102_3: # in Loop: Header=BB102_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a5, a5, (a6) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB102_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umax_i16_release: @@ -9103,33 +9103,33 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s4, 16(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s0, a0, -1 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: lui a1, 16 +; RV64I-NEXT: addiw s0, a1, -1 ; RV64I-NEXT: and s1, s2, s0 ; RV64I-NEXT: addi s3, sp, 14 ; RV64I-NEXT: j .LBB102_2 ; RV64I-NEXT: .LBB102_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB102_2 Depth=1 -; RV64I-NEXT: sh a1, 14(sp) -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: sh a0, 14(sp) ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 3 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 14(sp) -; RV64I-NEXT: bnez a0, .LBB102_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 14(sp) +; RV64I-NEXT: bnez a1, .LBB102_4 ; RV64I-NEXT: .LBB102_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: and a0, a1, s0 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bltu s1, a0, .LBB102_1 +; RV64I-NEXT: and a1, a0, s0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s1, a1, .LBB102_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB102_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB102_1 ; RV64I-NEXT: .LBB102_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s4, 16(sp) ; RV64I-NEXT: ld s3, 24(sp) ; RV64I-NEXT: ld s2, 32(sp) @@ -9141,28 +9141,28 @@ define i16 @atomicrmw_umax_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i16_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB102_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a3, (a6) -; RV64IA-NEXT: and a2, a3, a4 -; RV64IA-NEXT: mv a5, a3 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 ; RV64IA-NEXT: bgeu a2, a1, .LBB102_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB102_1 Depth=1 -; RV64IA-NEXT: xor a5, a3, a1 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB102_3: # in Loop: Header=BB102_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a5, a5, (a6) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB102_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b release ret i16 %1 @@ -9180,33 +9180,33 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s4, 8(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s0, a0, -1 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi s0, a1, -1 ; RV32I-NEXT: and s1, s2, s0 ; RV32I-NEXT: addi s3, sp, 6 ; RV32I-NEXT: j .LBB103_2 ; RV32I-NEXT: .LBB103_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB103_2 Depth=1 -; RV32I-NEXT: sh a1, 6(sp) -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sh a0, 6(sp) ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 6(sp) -; RV32I-NEXT: bnez a0, .LBB103_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 6(sp) +; RV32I-NEXT: bnez a1, .LBB103_4 ; RV32I-NEXT: .LBB103_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: and a0, a1, s0 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bltu s1, a0, .LBB103_1 +; RV32I-NEXT: and a1, a0, s0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bltu s1, a1, .LBB103_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB103_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB103_1 ; RV32I-NEXT: .LBB103_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s4, 8(sp) ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) @@ -9218,28 +9218,28 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umax_i16_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a6, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB103_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a3, (a6) -; RV32IA-NEXT: and a2, a3, a4 -; RV32IA-NEXT: mv a5, a3 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a2, a4, a6 +; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: bgeu a2, a1, .LBB103_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB103_1 Depth=1 -; RV32IA-NEXT: xor a5, a3, a1 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: xor a5, a4, a1 +; RV32IA-NEXT: and a5, a5, a6 +; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB103_3: # in Loop: Header=BB103_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a5, a5, (a6) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB103_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umax_i16_acq_rel: @@ -9253,33 +9253,33 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s4, 16(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s0, a0, -1 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: lui a1, 16 +; RV64I-NEXT: addiw s0, a1, -1 ; RV64I-NEXT: and s1, s2, s0 ; RV64I-NEXT: addi s3, sp, 14 ; RV64I-NEXT: j .LBB103_2 ; RV64I-NEXT: .LBB103_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB103_2 Depth=1 -; RV64I-NEXT: sh a1, 14(sp) -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sh a0, 14(sp) ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 4 +; RV64I-NEXT: addi a4, zero, 2 ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 14(sp) -; RV64I-NEXT: bnez a0, .LBB103_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 14(sp) +; RV64I-NEXT: bnez a1, .LBB103_4 ; RV64I-NEXT: .LBB103_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: and a0, a1, s0 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bltu s1, a0, .LBB103_1 +; RV64I-NEXT: and a1, a0, s0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s1, a1, .LBB103_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB103_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB103_1 ; RV64I-NEXT: .LBB103_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s4, 16(sp) ; RV64I-NEXT: ld s3, 24(sp) ; RV64I-NEXT: ld s2, 32(sp) @@ -9291,28 +9291,28 @@ define i16 @atomicrmw_umax_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i16_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB103_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a3, (a6) -; RV64IA-NEXT: and a2, a3, a4 -; RV64IA-NEXT: mv a5, a3 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 ; RV64IA-NEXT: bgeu a2, a1, .LBB103_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB103_1 Depth=1 -; RV64IA-NEXT: xor a5, a3, a1 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB103_3: # in Loop: Header=BB103_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a5, a5, (a6) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB103_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b acq_rel ret i16 %1 @@ -9330,33 +9330,33 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s4, 8(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s0, a0, -1 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi s0, a1, -1 ; RV32I-NEXT: and s1, s2, s0 ; RV32I-NEXT: addi s3, sp, 6 ; RV32I-NEXT: j .LBB104_2 ; RV32I-NEXT: .LBB104_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB104_2 Depth=1 -; RV32I-NEXT: sh a1, 6(sp) -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: sh a0, 6(sp) ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 6(sp) -; RV32I-NEXT: bnez a0, .LBB104_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 6(sp) +; RV32I-NEXT: bnez a1, .LBB104_4 ; RV32I-NEXT: .LBB104_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: and a0, a1, s0 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bltu s1, a0, .LBB104_1 +; RV32I-NEXT: and a1, a0, s0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bltu s1, a1, .LBB104_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB104_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB104_1 ; RV32I-NEXT: .LBB104_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s4, 8(sp) ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) @@ -9368,28 +9368,28 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umax_i16_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a6, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB104_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a3, (a6) -; RV32IA-NEXT: and a2, a3, a4 -; RV32IA-NEXT: mv a5, a3 +; RV32IA-NEXT: lr.w.aqrl a4, (a0) +; RV32IA-NEXT: and a2, a4, a6 +; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: bgeu a2, a1, .LBB104_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB104_1 Depth=1 -; RV32IA-NEXT: xor a5, a3, a1 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: xor a5, a4, a1 +; RV32IA-NEXT: and a5, a5, a6 +; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB104_3: # in Loop: Header=BB104_1 Depth=1 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a6) +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB104_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umax_i16_seq_cst: @@ -9403,33 +9403,33 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s4, 16(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s0, a0, -1 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: lui a1, 16 +; RV64I-NEXT: addiw s0, a1, -1 ; RV64I-NEXT: and s1, s2, s0 ; RV64I-NEXT: addi s3, sp, 14 ; RV64I-NEXT: j .LBB104_2 ; RV64I-NEXT: .LBB104_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB104_2 Depth=1 -; RV64I-NEXT: sh a1, 14(sp) -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: sh a0, 14(sp) ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 5 +; RV64I-NEXT: addi a4, zero, 5 ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 14(sp) -; RV64I-NEXT: bnez a0, .LBB104_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 14(sp) +; RV64I-NEXT: bnez a1, .LBB104_4 ; RV64I-NEXT: .LBB104_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: and a0, a1, s0 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bltu s1, a0, .LBB104_1 +; RV64I-NEXT: and a1, a0, s0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s1, a1, .LBB104_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB104_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB104_1 ; RV64I-NEXT: .LBB104_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s4, 16(sp) ; RV64I-NEXT: ld s3, 24(sp) ; RV64I-NEXT: ld s2, 32(sp) @@ -9441,28 +9441,28 @@ define i16 @atomicrmw_umax_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umax_i16_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB104_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a3, (a6) -; RV64IA-NEXT: and a2, a3, a4 -; RV64IA-NEXT: mv a5, a3 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 ; RV64IA-NEXT: bgeu a2, a1, .LBB104_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB104_1 Depth=1 -; RV64IA-NEXT: xor a5, a3, a1 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB104_3: # in Loop: Header=BB104_1 Depth=1 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a6) +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB104_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw umax i16* %a, i16 %b seq_cst ret i16 %1 @@ -9480,33 +9480,33 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s4, 8(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s0, a0, -1 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi s0, a1, -1 ; RV32I-NEXT: and s1, s2, s0 ; RV32I-NEXT: addi s3, sp, 6 ; RV32I-NEXT: j .LBB105_2 ; RV32I-NEXT: .LBB105_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB105_2 Depth=1 -; RV32I-NEXT: sh a1, 6(sp) +; RV32I-NEXT: sh a0, 6(sp) ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 6(sp) -; RV32I-NEXT: bnez a0, .LBB105_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 6(sp) +; RV32I-NEXT: bnez a1, .LBB105_4 ; RV32I-NEXT: .LBB105_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: and a0, a1, s0 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bgeu s1, a0, .LBB105_1 +; RV32I-NEXT: and a1, a0, s0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bgeu s1, a1, .LBB105_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB105_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB105_1 ; RV32I-NEXT: .LBB105_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s4, 8(sp) ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) @@ -9518,28 +9518,28 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umin_i16_monotonic: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a6, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB105_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a3, (a6) -; RV32IA-NEXT: and a2, a3, a4 -; RV32IA-NEXT: mv a5, a3 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a2, a4, a6 +; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: bgeu a1, a2, .LBB105_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB105_1 Depth=1 -; RV32IA-NEXT: xor a5, a3, a1 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: xor a5, a4, a1 +; RV32IA-NEXT: and a5, a5, a6 +; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB105_3: # in Loop: Header=BB105_1 Depth=1 -; RV32IA-NEXT: sc.w a5, a5, (a6) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB105_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umin_i16_monotonic: @@ -9553,33 +9553,33 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s4, 16(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s0, a0, -1 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: lui a1, 16 +; RV64I-NEXT: addiw s0, a1, -1 ; RV64I-NEXT: and s1, s2, s0 ; RV64I-NEXT: addi s3, sp, 14 ; RV64I-NEXT: j .LBB105_2 ; RV64I-NEXT: .LBB105_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB105_2 Depth=1 -; RV64I-NEXT: sh a1, 14(sp) +; RV64I-NEXT: sh a0, 14(sp) ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 14(sp) -; RV64I-NEXT: bnez a0, .LBB105_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 14(sp) +; RV64I-NEXT: bnez a1, .LBB105_4 ; RV64I-NEXT: .LBB105_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: and a0, a1, s0 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bgeu s1, a0, .LBB105_1 +; RV64I-NEXT: and a1, a0, s0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s1, a1, .LBB105_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB105_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB105_1 ; RV64I-NEXT: .LBB105_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s4, 16(sp) ; RV64I-NEXT: ld s3, 24(sp) ; RV64I-NEXT: ld s2, 32(sp) @@ -9591,28 +9591,28 @@ define i16 @atomicrmw_umin_i16_monotonic(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i16_monotonic: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB105_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a3, (a6) -; RV64IA-NEXT: and a2, a3, a4 -; RV64IA-NEXT: mv a5, a3 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 ; RV64IA-NEXT: bgeu a1, a2, .LBB105_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB105_1 Depth=1 -; RV64IA-NEXT: xor a5, a3, a1 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB105_3: # in Loop: Header=BB105_1 Depth=1 -; RV64IA-NEXT: sc.w a5, a5, (a6) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB105_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b monotonic ret i16 %1 @@ -9630,33 +9630,33 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s4, 8(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s0, a0, -1 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi s0, a1, -1 ; RV32I-NEXT: and s1, s2, s0 ; RV32I-NEXT: addi s3, sp, 6 ; RV32I-NEXT: j .LBB106_2 ; RV32I-NEXT: .LBB106_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB106_2 Depth=1 -; RV32I-NEXT: sh a1, 6(sp) -; RV32I-NEXT: addi a3, zero, 2 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sh a0, 6(sp) ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 2 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 6(sp) -; RV32I-NEXT: bnez a0, .LBB106_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 6(sp) +; RV32I-NEXT: bnez a1, .LBB106_4 ; RV32I-NEXT: .LBB106_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: and a0, a1, s0 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bgeu s1, a0, .LBB106_1 +; RV32I-NEXT: and a1, a0, s0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bgeu s1, a1, .LBB106_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB106_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB106_1 ; RV32I-NEXT: .LBB106_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s4, 8(sp) ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) @@ -9668,28 +9668,28 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umin_i16_acquire: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a6, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB106_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a3, (a6) -; RV32IA-NEXT: and a2, a3, a4 -; RV32IA-NEXT: mv a5, a3 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a2, a4, a6 +; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: bgeu a1, a2, .LBB106_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB106_1 Depth=1 -; RV32IA-NEXT: xor a5, a3, a1 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: xor a5, a4, a1 +; RV32IA-NEXT: and a5, a5, a6 +; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB106_3: # in Loop: Header=BB106_1 Depth=1 -; RV32IA-NEXT: sc.w a5, a5, (a6) +; RV32IA-NEXT: sc.w a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB106_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umin_i16_acquire: @@ -9703,33 +9703,33 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s4, 16(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s0, a0, -1 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: lui a1, 16 +; RV64I-NEXT: addiw s0, a1, -1 ; RV64I-NEXT: and s1, s2, s0 ; RV64I-NEXT: addi s3, sp, 14 ; RV64I-NEXT: j .LBB106_2 ; RV64I-NEXT: .LBB106_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB106_2 Depth=1 -; RV64I-NEXT: sh a1, 14(sp) -; RV64I-NEXT: addi a3, zero, 2 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sh a0, 14(sp) ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 2 +; RV64I-NEXT: addi a4, zero, 2 ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 14(sp) -; RV64I-NEXT: bnez a0, .LBB106_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 14(sp) +; RV64I-NEXT: bnez a1, .LBB106_4 ; RV64I-NEXT: .LBB106_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: and a0, a1, s0 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bgeu s1, a0, .LBB106_1 +; RV64I-NEXT: and a1, a0, s0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s1, a1, .LBB106_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB106_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB106_1 ; RV64I-NEXT: .LBB106_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s4, 16(sp) ; RV64I-NEXT: ld s3, 24(sp) ; RV64I-NEXT: ld s2, 32(sp) @@ -9741,28 +9741,28 @@ define i16 @atomicrmw_umin_i16_acquire(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i16_acquire: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB106_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a3, (a6) -; RV64IA-NEXT: and a2, a3, a4 -; RV64IA-NEXT: mv a5, a3 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 ; RV64IA-NEXT: bgeu a1, a2, .LBB106_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB106_1 Depth=1 -; RV64IA-NEXT: xor a5, a3, a1 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB106_3: # in Loop: Header=BB106_1 Depth=1 -; RV64IA-NEXT: sc.w a5, a5, (a6) +; RV64IA-NEXT: sc.w a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB106_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b acquire ret i16 %1 @@ -9780,33 +9780,33 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s4, 8(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s0, a0, -1 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi s0, a1, -1 ; RV32I-NEXT: and s1, s2, s0 ; RV32I-NEXT: addi s3, sp, 6 ; RV32I-NEXT: j .LBB107_2 ; RV32I-NEXT: .LBB107_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB107_2 Depth=1 -; RV32I-NEXT: sh a1, 6(sp) -; RV32I-NEXT: addi a3, zero, 3 +; RV32I-NEXT: sh a0, 6(sp) ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 6(sp) -; RV32I-NEXT: bnez a0, .LBB107_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 6(sp) +; RV32I-NEXT: bnez a1, .LBB107_4 ; RV32I-NEXT: .LBB107_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: and a0, a1, s0 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bgeu s1, a0, .LBB107_1 +; RV32I-NEXT: and a1, a0, s0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bgeu s1, a1, .LBB107_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB107_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB107_1 ; RV32I-NEXT: .LBB107_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s4, 8(sp) ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) @@ -9818,28 +9818,28 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umin_i16_release: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a6, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB107_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w a3, (a6) -; RV32IA-NEXT: and a2, a3, a4 -; RV32IA-NEXT: mv a5, a3 +; RV32IA-NEXT: lr.w a4, (a0) +; RV32IA-NEXT: and a2, a4, a6 +; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: bgeu a1, a2, .LBB107_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB107_1 Depth=1 -; RV32IA-NEXT: xor a5, a3, a1 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: xor a5, a4, a1 +; RV32IA-NEXT: and a5, a5, a6 +; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB107_3: # in Loop: Header=BB107_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a5, a5, (a6) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB107_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umin_i16_release: @@ -9853,33 +9853,33 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s4, 16(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s0, a0, -1 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: lui a1, 16 +; RV64I-NEXT: addiw s0, a1, -1 ; RV64I-NEXT: and s1, s2, s0 ; RV64I-NEXT: addi s3, sp, 14 ; RV64I-NEXT: j .LBB107_2 ; RV64I-NEXT: .LBB107_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB107_2 Depth=1 -; RV64I-NEXT: sh a1, 14(sp) -; RV64I-NEXT: addi a3, zero, 3 +; RV64I-NEXT: sh a0, 14(sp) ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 3 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 14(sp) -; RV64I-NEXT: bnez a0, .LBB107_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 14(sp) +; RV64I-NEXT: bnez a1, .LBB107_4 ; RV64I-NEXT: .LBB107_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: and a0, a1, s0 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bgeu s1, a0, .LBB107_1 +; RV64I-NEXT: and a1, a0, s0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s1, a1, .LBB107_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB107_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB107_1 ; RV64I-NEXT: .LBB107_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s4, 16(sp) ; RV64I-NEXT: ld s3, 24(sp) ; RV64I-NEXT: ld s2, 32(sp) @@ -9891,28 +9891,28 @@ define i16 @atomicrmw_umin_i16_release(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i16_release: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB107_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w a3, (a6) -; RV64IA-NEXT: and a2, a3, a4 -; RV64IA-NEXT: mv a5, a3 +; RV64IA-NEXT: lr.w a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 ; RV64IA-NEXT: bgeu a1, a2, .LBB107_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB107_1 Depth=1 -; RV64IA-NEXT: xor a5, a3, a1 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB107_3: # in Loop: Header=BB107_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a5, a5, (a6) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB107_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b release ret i16 %1 @@ -9930,33 +9930,33 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s4, 8(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s0, a0, -1 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi s0, a1, -1 ; RV32I-NEXT: and s1, s2, s0 ; RV32I-NEXT: addi s3, sp, 6 ; RV32I-NEXT: j .LBB108_2 ; RV32I-NEXT: .LBB108_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB108_2 Depth=1 -; RV32I-NEXT: sh a1, 6(sp) -; RV32I-NEXT: addi a3, zero, 4 -; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sh a0, 6(sp) ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 4 +; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 6(sp) -; RV32I-NEXT: bnez a0, .LBB108_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 6(sp) +; RV32I-NEXT: bnez a1, .LBB108_4 ; RV32I-NEXT: .LBB108_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: and a0, a1, s0 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bgeu s1, a0, .LBB108_1 +; RV32I-NEXT: and a1, a0, s0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bgeu s1, a1, .LBB108_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB108_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB108_1 ; RV32I-NEXT: .LBB108_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s4, 8(sp) ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) @@ -9968,28 +9968,28 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umin_i16_acq_rel: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a6, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB108_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aq a3, (a6) -; RV32IA-NEXT: and a2, a3, a4 -; RV32IA-NEXT: mv a5, a3 +; RV32IA-NEXT: lr.w.aq a4, (a0) +; RV32IA-NEXT: and a2, a4, a6 +; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: bgeu a1, a2, .LBB108_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB108_1 Depth=1 -; RV32IA-NEXT: xor a5, a3, a1 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: xor a5, a4, a1 +; RV32IA-NEXT: and a5, a5, a6 +; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB108_3: # in Loop: Header=BB108_1 Depth=1 -; RV32IA-NEXT: sc.w.rl a5, a5, (a6) +; RV32IA-NEXT: sc.w.rl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB108_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umin_i16_acq_rel: @@ -10003,33 +10003,33 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s4, 16(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s0, a0, -1 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: lui a1, 16 +; RV64I-NEXT: addiw s0, a1, -1 ; RV64I-NEXT: and s1, s2, s0 ; RV64I-NEXT: addi s3, sp, 14 ; RV64I-NEXT: j .LBB108_2 ; RV64I-NEXT: .LBB108_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB108_2 Depth=1 -; RV64I-NEXT: sh a1, 14(sp) -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sh a0, 14(sp) ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 4 +; RV64I-NEXT: addi a4, zero, 2 ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 14(sp) -; RV64I-NEXT: bnez a0, .LBB108_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 14(sp) +; RV64I-NEXT: bnez a1, .LBB108_4 ; RV64I-NEXT: .LBB108_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: and a0, a1, s0 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bgeu s1, a0, .LBB108_1 +; RV64I-NEXT: and a1, a0, s0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s1, a1, .LBB108_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB108_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB108_1 ; RV64I-NEXT: .LBB108_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s4, 16(sp) ; RV64I-NEXT: ld s3, 24(sp) ; RV64I-NEXT: ld s2, 32(sp) @@ -10041,28 +10041,28 @@ define i16 @atomicrmw_umin_i16_acq_rel(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i16_acq_rel: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB108_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aq a3, (a6) -; RV64IA-NEXT: and a2, a3, a4 -; RV64IA-NEXT: mv a5, a3 +; RV64IA-NEXT: lr.w.aq a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 ; RV64IA-NEXT: bgeu a1, a2, .LBB108_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB108_1 Depth=1 -; RV64IA-NEXT: xor a5, a3, a1 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB108_3: # in Loop: Header=BB108_1 Depth=1 -; RV64IA-NEXT: sc.w.rl a5, a5, (a6) +; RV64IA-NEXT: sc.w.rl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB108_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b acq_rel ret i16 %1 @@ -10080,33 +10080,33 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV32I-NEXT: sw s4, 8(sp) ; RV32I-NEXT: mv s2, a1 ; RV32I-NEXT: mv s4, a0 -; RV32I-NEXT: lhu a1, 0(a0) -; RV32I-NEXT: lui a0, 16 -; RV32I-NEXT: addi s0, a0, -1 +; RV32I-NEXT: lhu a0, 0(a0) +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi s0, a1, -1 ; RV32I-NEXT: and s1, s2, s0 ; RV32I-NEXT: addi s3, sp, 6 ; RV32I-NEXT: j .LBB109_2 ; RV32I-NEXT: .LBB109_1: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB109_2 Depth=1 -; RV32I-NEXT: sh a1, 6(sp) -; RV32I-NEXT: addi a3, zero, 5 -; RV32I-NEXT: addi a4, zero, 5 +; RV32I-NEXT: sh a0, 6(sp) ; RV32I-NEXT: mv a0, s4 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a3, zero, 5 +; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: call __atomic_compare_exchange_2 -; RV32I-NEXT: lh a1, 6(sp) -; RV32I-NEXT: bnez a0, .LBB109_4 +; RV32I-NEXT: mv a1, a0 +; RV32I-NEXT: lh a0, 6(sp) +; RV32I-NEXT: bnez a1, .LBB109_4 ; RV32I-NEXT: .LBB109_2: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: and a0, a1, s0 -; RV32I-NEXT: mv a2, a1 -; RV32I-NEXT: bgeu s1, a0, .LBB109_1 +; RV32I-NEXT: and a1, a0, s0 +; RV32I-NEXT: mv a2, a0 +; RV32I-NEXT: bgeu s1, a1, .LBB109_1 ; RV32I-NEXT: # %bb.3: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB109_2 Depth=1 ; RV32I-NEXT: mv a2, s2 ; RV32I-NEXT: j .LBB109_1 ; RV32I-NEXT: .LBB109_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: lw s4, 8(sp) ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) @@ -10118,28 +10118,28 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV32IA-LABEL: atomicrmw_umin_i16_seq_cst: ; RV32IA: # %bb.0: -; RV32IA-NEXT: andi a6, a0, -4 -; RV32IA-NEXT: slli a0, a0, 3 -; RV32IA-NEXT: andi a0, a0, 24 -; RV32IA-NEXT: lui a3, 16 -; RV32IA-NEXT: addi a3, a3, -1 -; RV32IA-NEXT: sll a4, a3, a0 -; RV32IA-NEXT: and a1, a1, a3 -; RV32IA-NEXT: sll a1, a1, a0 +; RV32IA-NEXT: lui a2, 16 +; RV32IA-NEXT: addi a2, a2, -1 +; RV32IA-NEXT: and a1, a1, a2 +; RV32IA-NEXT: slli a3, a0, 3 +; RV32IA-NEXT: andi a3, a3, 24 +; RV32IA-NEXT: sll a6, a2, a3 +; RV32IA-NEXT: sll a1, a1, a3 +; RV32IA-NEXT: andi a0, a0, -4 ; RV32IA-NEXT: .LBB109_1: # =>This Inner Loop Header: Depth=1 -; RV32IA-NEXT: lr.w.aqrl a3, (a6) -; RV32IA-NEXT: and a2, a3, a4 -; RV32IA-NEXT: mv a5, a3 +; RV32IA-NEXT: lr.w.aqrl a4, (a0) +; RV32IA-NEXT: and a2, a4, a6 +; RV32IA-NEXT: mv a5, a4 ; RV32IA-NEXT: bgeu a1, a2, .LBB109_3 ; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB109_1 Depth=1 -; RV32IA-NEXT: xor a5, a3, a1 -; RV32IA-NEXT: and a5, a5, a4 -; RV32IA-NEXT: xor a5, a3, a5 +; RV32IA-NEXT: xor a5, a4, a1 +; RV32IA-NEXT: and a5, a5, a6 +; RV32IA-NEXT: xor a5, a4, a5 ; RV32IA-NEXT: .LBB109_3: # in Loop: Header=BB109_1 Depth=1 -; RV32IA-NEXT: sc.w.aqrl a5, a5, (a6) +; RV32IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV32IA-NEXT: bnez a5, .LBB109_1 ; RV32IA-NEXT: # %bb.4: -; RV32IA-NEXT: srl a0, a3, a0 +; RV32IA-NEXT: srl a0, a4, a3 ; RV32IA-NEXT: ret ; ; RV64I-LABEL: atomicrmw_umin_i16_seq_cst: @@ -10153,33 +10153,33 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; RV64I-NEXT: sd s4, 16(sp) ; RV64I-NEXT: mv s2, a1 ; RV64I-NEXT: mv s4, a0 -; RV64I-NEXT: lhu a1, 0(a0) -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw s0, a0, -1 +; RV64I-NEXT: lhu a0, 0(a0) +; RV64I-NEXT: lui a1, 16 +; RV64I-NEXT: addiw s0, a1, -1 ; RV64I-NEXT: and s1, s2, s0 ; RV64I-NEXT: addi s3, sp, 14 ; RV64I-NEXT: j .LBB109_2 ; RV64I-NEXT: .LBB109_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB109_2 Depth=1 -; RV64I-NEXT: sh a1, 14(sp) -; RV64I-NEXT: addi a3, zero, 5 -; RV64I-NEXT: addi a4, zero, 5 +; RV64I-NEXT: sh a0, 14(sp) ; RV64I-NEXT: mv a0, s4 ; RV64I-NEXT: mv a1, s3 +; RV64I-NEXT: addi a3, zero, 5 +; RV64I-NEXT: addi a4, zero, 5 ; RV64I-NEXT: call __atomic_compare_exchange_2 -; RV64I-NEXT: lh a1, 14(sp) -; RV64I-NEXT: bnez a0, .LBB109_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lh a0, 14(sp) +; RV64I-NEXT: bnez a1, .LBB109_4 ; RV64I-NEXT: .LBB109_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: and a0, a1, s0 -; RV64I-NEXT: mv a2, a1 -; RV64I-NEXT: bgeu s1, a0, .LBB109_1 +; RV64I-NEXT: and a1, a0, s0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s1, a1, .LBB109_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB109_2 Depth=1 ; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB109_1 ; RV64I-NEXT: .LBB109_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ld s4, 16(sp) ; RV64I-NEXT: ld s3, 24(sp) ; RV64I-NEXT: ld s2, 32(sp) @@ -10191,28 +10191,28 @@ define i16 @atomicrmw_umin_i16_seq_cst(i16 *%a, i16 %b) nounwind { ; ; RV64IA-LABEL: atomicrmw_umin_i16_seq_cst: ; RV64IA: # %bb.0: -; RV64IA-NEXT: andi a6, a0, -4 -; RV64IA-NEXT: slli a0, a0, 3 -; RV64IA-NEXT: andi a0, a0, 24 -; RV64IA-NEXT: lui a3, 16 -; RV64IA-NEXT: addiw a3, a3, -1 -; RV64IA-NEXT: sllw a4, a3, a0 -; RV64IA-NEXT: and a1, a1, a3 -; RV64IA-NEXT: sllw a1, a1, a0 +; RV64IA-NEXT: lui a2, 16 +; RV64IA-NEXT: addiw a2, a2, -1 +; RV64IA-NEXT: and a1, a1, a2 +; RV64IA-NEXT: slli a3, a0, 3 +; RV64IA-NEXT: andi a3, a3, 24 +; RV64IA-NEXT: sllw a6, a2, a3 +; RV64IA-NEXT: sllw a1, a1, a3 +; RV64IA-NEXT: andi a0, a0, -4 ; RV64IA-NEXT: .LBB109_1: # =>This Inner Loop Header: Depth=1 -; RV64IA-NEXT: lr.w.aqrl a3, (a6) -; RV64IA-NEXT: and a2, a3, a4 -; RV64IA-NEXT: mv a5, a3 +; RV64IA-NEXT: lr.w.aqrl a4, (a0) +; RV64IA-NEXT: and a2, a4, a6 +; RV64IA-NEXT: mv a5, a4 ; RV64IA-NEXT: bgeu a1, a2, .LBB109_3 ; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB109_1 Depth=1 -; RV64IA-NEXT: xor a5, a3, a1 -; RV64IA-NEXT: and a5, a5, a4 -; RV64IA-NEXT: xor a5, a3, a5 +; RV64IA-NEXT: xor a5, a4, a1 +; RV64IA-NEXT: and a5, a5, a6 +; RV64IA-NEXT: xor a5, a4, a5 ; RV64IA-NEXT: .LBB109_3: # in Loop: Header=BB109_1 Depth=1 -; RV64IA-NEXT: sc.w.aqrl a5, a5, (a6) +; RV64IA-NEXT: sc.w.aqrl a5, a5, (a0) ; RV64IA-NEXT: bnez a5, .LBB109_1 ; RV64IA-NEXT: # %bb.4: -; RV64IA-NEXT: srlw a0, a3, a0 +; RV64IA-NEXT: srlw a0, a4, a3 ; RV64IA-NEXT: ret %1 = atomicrmw umin i16* %a, i16 %b seq_cst ret i16 %1 @@ -11496,31 +11496,30 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bge s1, a3, .LBB145_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bge s0, a2, .LBB145_3 ; RV32I-NEXT: .LBB145_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB145_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB145_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: blt s1, a3, .LBB145_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: blt s0, a2, .LBB145_1 ; RV32I-NEXT: .LBB145_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB145_1 ; RV32I-NEXT: .LBB145_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -11541,33 +11540,33 @@ define i32 @atomicrmw_max_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB145_2 ; RV64I-NEXT: .LBB145_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB145_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB145_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB145_4 ; RV64I-NEXT: .LBB145_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a0, .LBB145_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB145_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB145_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB145_1 ; RV64I-NEXT: .LBB145_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -11592,31 +11591,30 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bge s1, a3, .LBB146_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bge s0, a2, .LBB146_3 ; RV32I-NEXT: .LBB146_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: addi a3, zero, 2 ; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB146_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB146_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: blt s1, a3, .LBB146_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: blt s0, a2, .LBB146_1 ; RV32I-NEXT: .LBB146_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB146_1 ; RV32I-NEXT: .LBB146_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -11637,33 +11635,33 @@ define i32 @atomicrmw_max_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB146_2 ; RV64I-NEXT: .LBB146_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB146_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 2 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB146_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB146_4 ; RV64I-NEXT: .LBB146_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a0, .LBB146_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB146_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB146_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB146_1 ; RV64I-NEXT: .LBB146_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -11688,31 +11686,30 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bge s1, a3, .LBB147_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bge s0, a2, .LBB147_3 ; RV32I-NEXT: .LBB147_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) -; RV32I-NEXT: addi a3, zero, 3 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB147_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB147_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: blt s1, a3, .LBB147_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: blt s0, a2, .LBB147_1 ; RV32I-NEXT: .LBB147_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB147_1 ; RV32I-NEXT: .LBB147_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -11733,33 +11730,33 @@ define i32 @atomicrmw_max_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB147_2 ; RV64I-NEXT: .LBB147_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB147_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB147_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB147_4 ; RV64I-NEXT: .LBB147_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a0, .LBB147_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB147_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB147_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB147_1 ; RV64I-NEXT: .LBB147_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -11784,31 +11781,30 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bge s1, a3, .LBB148_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bge s0, a2, .LBB148_3 ; RV32I-NEXT: .LBB148_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: addi a3, zero, 4 ; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB148_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB148_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: blt s1, a3, .LBB148_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: blt s0, a2, .LBB148_1 ; RV32I-NEXT: .LBB148_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB148_1 ; RV32I-NEXT: .LBB148_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -11829,33 +11825,33 @@ define i32 @atomicrmw_max_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB148_2 ; RV64I-NEXT: .LBB148_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB148_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 4 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB148_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB148_4 ; RV64I-NEXT: .LBB148_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a0, .LBB148_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB148_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB148_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB148_1 ; RV64I-NEXT: .LBB148_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -11880,31 +11876,30 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bge s1, a3, .LBB149_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bge s0, a2, .LBB149_3 ; RV32I-NEXT: .LBB149_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: addi a3, zero, 5 ; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB149_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB149_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: blt s1, a3, .LBB149_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: blt s0, a2, .LBB149_1 ; RV32I-NEXT: .LBB149_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB149_1 ; RV32I-NEXT: .LBB149_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -11925,33 +11920,33 @@ define i32 @atomicrmw_max_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB149_2 ; RV64I-NEXT: .LBB149_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB149_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 5 ; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB149_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB149_4 ; RV64I-NEXT: .LBB149_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a0, .LBB149_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: blt s0, a1, .LBB149_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB149_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB149_1 ; RV64I-NEXT: .LBB149_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -11976,31 +11971,30 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: blt s1, a3, .LBB150_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: blt s0, a2, .LBB150_3 ; RV32I-NEXT: .LBB150_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB150_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB150_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bge s1, a3, .LBB150_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bge s0, a2, .LBB150_1 ; RV32I-NEXT: .LBB150_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB150_1 ; RV32I-NEXT: .LBB150_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -12021,33 +12015,33 @@ define i32 @atomicrmw_min_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB150_2 ; RV64I-NEXT: .LBB150_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB150_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB150_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB150_4 ; RV64I-NEXT: .LBB150_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a0, .LBB150_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB150_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB150_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB150_1 ; RV64I-NEXT: .LBB150_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -12072,31 +12066,30 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: blt s1, a3, .LBB151_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: blt s0, a2, .LBB151_3 ; RV32I-NEXT: .LBB151_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: addi a3, zero, 2 ; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB151_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB151_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bge s1, a3, .LBB151_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bge s0, a2, .LBB151_1 ; RV32I-NEXT: .LBB151_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB151_1 ; RV32I-NEXT: .LBB151_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -12117,33 +12110,33 @@ define i32 @atomicrmw_min_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB151_2 ; RV64I-NEXT: .LBB151_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB151_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 2 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB151_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB151_4 ; RV64I-NEXT: .LBB151_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a0, .LBB151_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB151_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB151_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB151_1 ; RV64I-NEXT: .LBB151_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -12168,31 +12161,30 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: blt s1, a3, .LBB152_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: blt s0, a2, .LBB152_3 ; RV32I-NEXT: .LBB152_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) -; RV32I-NEXT: addi a3, zero, 3 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB152_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB152_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bge s1, a3, .LBB152_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bge s0, a2, .LBB152_1 ; RV32I-NEXT: .LBB152_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB152_1 ; RV32I-NEXT: .LBB152_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -12213,33 +12205,33 @@ define i32 @atomicrmw_min_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB152_2 ; RV64I-NEXT: .LBB152_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB152_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB152_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB152_4 ; RV64I-NEXT: .LBB152_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a0, .LBB152_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB152_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB152_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB152_1 ; RV64I-NEXT: .LBB152_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -12264,31 +12256,30 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: blt s1, a3, .LBB153_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: blt s0, a2, .LBB153_3 ; RV32I-NEXT: .LBB153_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: addi a3, zero, 4 ; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB153_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB153_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bge s1, a3, .LBB153_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bge s0, a2, .LBB153_1 ; RV32I-NEXT: .LBB153_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB153_1 ; RV32I-NEXT: .LBB153_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -12309,33 +12300,33 @@ define i32 @atomicrmw_min_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB153_2 ; RV64I-NEXT: .LBB153_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB153_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 4 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB153_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB153_4 ; RV64I-NEXT: .LBB153_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a0, .LBB153_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB153_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB153_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB153_1 ; RV64I-NEXT: .LBB153_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -12360,31 +12351,30 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: blt s1, a3, .LBB154_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: blt s0, a2, .LBB154_3 ; RV32I-NEXT: .LBB154_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: addi a3, zero, 5 ; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB154_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB154_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bge s1, a3, .LBB154_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bge s0, a2, .LBB154_1 ; RV32I-NEXT: .LBB154_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB154_1 ; RV32I-NEXT: .LBB154_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -12405,33 +12395,33 @@ define i32 @atomicrmw_min_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB154_2 ; RV64I-NEXT: .LBB154_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB154_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 5 ; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB154_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB154_4 ; RV64I-NEXT: .LBB154_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a0, .LBB154_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bge s0, a1, .LBB154_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB154_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB154_1 ; RV64I-NEXT: .LBB154_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -12456,31 +12446,30 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a3, .LBB155_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bgeu s0, a2, .LBB155_3 ; RV32I-NEXT: .LBB155_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB155_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB155_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a3, .LBB155_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bltu s0, a2, .LBB155_1 ; RV32I-NEXT: .LBB155_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB155_1 ; RV32I-NEXT: .LBB155_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -12501,33 +12490,33 @@ define i32 @atomicrmw_umax_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB155_2 ; RV64I-NEXT: .LBB155_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB155_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB155_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB155_4 ; RV64I-NEXT: .LBB155_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a0, .LBB155_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s0, a1, .LBB155_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB155_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB155_1 ; RV64I-NEXT: .LBB155_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -12552,31 +12541,30 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a3, .LBB156_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bgeu s0, a2, .LBB156_3 ; RV32I-NEXT: .LBB156_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: addi a3, zero, 2 ; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB156_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB156_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a3, .LBB156_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bltu s0, a2, .LBB156_1 ; RV32I-NEXT: .LBB156_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB156_1 ; RV32I-NEXT: .LBB156_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -12597,33 +12585,33 @@ define i32 @atomicrmw_umax_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB156_2 ; RV64I-NEXT: .LBB156_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB156_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 2 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB156_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB156_4 ; RV64I-NEXT: .LBB156_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a0, .LBB156_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s0, a1, .LBB156_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB156_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB156_1 ; RV64I-NEXT: .LBB156_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -12648,31 +12636,30 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a3, .LBB157_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bgeu s0, a2, .LBB157_3 ; RV32I-NEXT: .LBB157_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) -; RV32I-NEXT: addi a3, zero, 3 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB157_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB157_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a3, .LBB157_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bltu s0, a2, .LBB157_1 ; RV32I-NEXT: .LBB157_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB157_1 ; RV32I-NEXT: .LBB157_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -12693,33 +12680,33 @@ define i32 @atomicrmw_umax_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB157_2 ; RV64I-NEXT: .LBB157_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB157_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB157_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB157_4 ; RV64I-NEXT: .LBB157_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a0, .LBB157_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s0, a1, .LBB157_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB157_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB157_1 ; RV64I-NEXT: .LBB157_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -12744,31 +12731,30 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a3, .LBB158_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bgeu s0, a2, .LBB158_3 ; RV32I-NEXT: .LBB158_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: addi a3, zero, 4 ; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB158_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB158_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a3, .LBB158_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bltu s0, a2, .LBB158_1 ; RV32I-NEXT: .LBB158_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB158_1 ; RV32I-NEXT: .LBB158_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -12789,33 +12775,33 @@ define i32 @atomicrmw_umax_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB158_2 ; RV64I-NEXT: .LBB158_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB158_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 4 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB158_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB158_4 ; RV64I-NEXT: .LBB158_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a0, .LBB158_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s0, a1, .LBB158_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB158_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB158_1 ; RV64I-NEXT: .LBB158_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -12840,31 +12826,30 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a3, .LBB159_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bgeu s0, a2, .LBB159_3 ; RV32I-NEXT: .LBB159_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: addi a3, zero, 5 ; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB159_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB159_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a3, .LBB159_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bltu s0, a2, .LBB159_1 ; RV32I-NEXT: .LBB159_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB159_1 ; RV32I-NEXT: .LBB159_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -12885,33 +12870,33 @@ define i32 @atomicrmw_umax_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB159_2 ; RV64I-NEXT: .LBB159_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB159_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 5 ; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB159_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB159_4 ; RV64I-NEXT: .LBB159_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a0, .LBB159_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bltu s0, a1, .LBB159_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB159_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB159_1 ; RV64I-NEXT: .LBB159_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -12936,31 +12921,30 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a3, .LBB160_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bltu s0, a2, .LBB160_3 ; RV32I-NEXT: .LBB160_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB160_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB160_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a3, .LBB160_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bgeu s0, a2, .LBB160_1 ; RV32I-NEXT: .LBB160_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB160_1 ; RV32I-NEXT: .LBB160_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -12981,33 +12965,33 @@ define i32 @atomicrmw_umin_i32_monotonic(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB160_2 ; RV64I-NEXT: .LBB160_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB160_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB160_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB160_4 ; RV64I-NEXT: .LBB160_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a0, .LBB160_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s0, a1, .LBB160_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB160_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB160_1 ; RV64I-NEXT: .LBB160_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -13032,31 +13016,30 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a3, .LBB161_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bltu s0, a2, .LBB161_3 ; RV32I-NEXT: .LBB161_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: addi a3, zero, 2 ; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB161_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB161_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a3, .LBB161_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bgeu s0, a2, .LBB161_1 ; RV32I-NEXT: .LBB161_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB161_1 ; RV32I-NEXT: .LBB161_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -13077,33 +13060,33 @@ define i32 @atomicrmw_umin_i32_acquire(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB161_2 ; RV64I-NEXT: .LBB161_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB161_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 2 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB161_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB161_4 ; RV64I-NEXT: .LBB161_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a0, .LBB161_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s0, a1, .LBB161_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB161_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB161_1 ; RV64I-NEXT: .LBB161_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -13128,31 +13111,30 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a3, .LBB162_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bltu s0, a2, .LBB162_3 ; RV32I-NEXT: .LBB162_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) -; RV32I-NEXT: addi a3, zero, 3 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s2 +; RV32I-NEXT: addi a3, zero, 3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB162_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB162_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a3, .LBB162_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bgeu s0, a2, .LBB162_1 ; RV32I-NEXT: .LBB162_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB162_1 ; RV32I-NEXT: .LBB162_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -13173,33 +13155,33 @@ define i32 @atomicrmw_umin_i32_release(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB162_2 ; RV64I-NEXT: .LBB162_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB162_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB162_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB162_4 ; RV64I-NEXT: .LBB162_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a0, .LBB162_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s0, a1, .LBB162_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB162_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB162_1 ; RV64I-NEXT: .LBB162_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -13224,31 +13206,30 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a3, .LBB163_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bltu s0, a2, .LBB163_3 ; RV32I-NEXT: .LBB163_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: addi a3, zero, 4 ; RV32I-NEXT: addi a4, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB163_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB163_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a3, .LBB163_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bgeu s0, a2, .LBB163_1 ; RV32I-NEXT: .LBB163_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB163_1 ; RV32I-NEXT: .LBB163_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -13269,33 +13250,33 @@ define i32 @atomicrmw_umin_i32_acq_rel(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB163_2 ; RV64I-NEXT: .LBB163_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB163_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 4 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB163_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB163_4 ; RV64I-NEXT: .LBB163_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a0, .LBB163_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s0, a1, .LBB163_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB163_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB163_1 ; RV64I-NEXT: .LBB163_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -13320,31 +13301,30 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV32I-NEXT: sw s0, 24(sp) ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a3, 0(a0) -; RV32I-NEXT: mv s1, a1 +; RV32I-NEXT: mv s0, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: addi s2, sp, 12 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bltu s1, a3, .LBB164_3 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bltu s0, a2, .LBB164_3 ; RV32I-NEXT: .LBB164_1: # %atomicrmw.start ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: sw a3, 12(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: addi a3, zero, 5 ; RV32I-NEXT: addi a4, zero, 5 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s2 ; RV32I-NEXT: call __atomic_compare_exchange_4 -; RV32I-NEXT: lw a3, 12(sp) +; RV32I-NEXT: lw a2, 12(sp) ; RV32I-NEXT: bnez a0, .LBB164_4 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start ; RV32I-NEXT: # in Loop: Header=BB164_1 Depth=1 -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bgeu s1, a3, .LBB164_1 +; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: bgeu s0, a2, .LBB164_1 ; RV32I-NEXT: .LBB164_3: # %atomicrmw.start -; RV32I-NEXT: mv a2, s1 +; RV32I-NEXT: mv a2, s0 ; RV32I-NEXT: j .LBB164_1 ; RV32I-NEXT: .LBB164_4: # %atomicrmw.end -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) ; RV32I-NEXT: lw s0, 24(sp) @@ -13365,33 +13345,33 @@ define i32 @atomicrmw_umin_i32_seq_cst(i32 *%a, i32 %b) nounwind { ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) ; RV64I-NEXT: sd s3, 8(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: lwu a3, 0(a0) -; RV64I-NEXT: mv s3, a1 -; RV64I-NEXT: sext.w s1, a1 -; RV64I-NEXT: addi s2, sp, 4 +; RV64I-NEXT: mv s2, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: lwu a0, 0(a0) +; RV64I-NEXT: sext.w s0, a1 +; RV64I-NEXT: addi s3, sp, 4 ; RV64I-NEXT: j .LBB164_2 ; RV64I-NEXT: .LBB164_1: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB164_2 Depth=1 -; RV64I-NEXT: sw a3, 4(sp) +; RV64I-NEXT: sw a0, 4(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s3 ; RV64I-NEXT: addi a3, zero, 5 ; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_4 -; RV64I-NEXT: lw a3, 4(sp) -; RV64I-NEXT: bnez a0, .LBB164_4 +; RV64I-NEXT: mv a1, a0 +; RV64I-NEXT: lw a0, 4(sp) +; RV64I-NEXT: bnez a1, .LBB164_4 ; RV64I-NEXT: .LBB164_2: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sext.w a0, a3 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a0, .LBB164_1 +; RV64I-NEXT: sext.w a1, a0 +; RV64I-NEXT: mv a2, a0 +; RV64I-NEXT: bgeu s0, a1, .LBB164_1 ; RV64I-NEXT: # %bb.3: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB164_2 Depth=1 -; RV64I-NEXT: mv a2, s3 +; RV64I-NEXT: mv a2, s2 ; RV64I-NEXT: j .LBB164_1 ; RV64I-NEXT: .LBB164_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 ; RV64I-NEXT: ld s3, 8(sp) ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) @@ -14822,43 +14802,41 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB200_3 +; RV32I-NEXT: bne a1, s0, .LBB200_3 ; RV32I-NEXT: j .LBB200_4 ; RV32I-NEXT: .LBB200_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB200_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB200_4 +; RV32I-NEXT: beq a1, s0, .LBB200_4 ; RV32I-NEXT: .LBB200_3: # %atomicrmw.start -; RV32I-NEXT: slt a0, s1, a5 +; RV32I-NEXT: slt a0, s0, a1 ; RV32I-NEXT: j .LBB200_5 ; RV32I-NEXT: .LBB200_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB200_5: # %atomicrmw.start -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB200_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB200_1 ; RV32I-NEXT: .LBB200_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -14875,43 +14853,41 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB200_3 +; RV32IA-NEXT: bne a1, s0, .LBB200_3 ; RV32IA-NEXT: j .LBB200_4 ; RV32IA-NEXT: .LBB200_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) -; RV32IA-NEXT: mv a0, s0 +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 ; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: mv a4, zero ; RV32IA-NEXT: mv a5, zero ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB200_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB200_4 +; RV32IA-NEXT: beq a1, s0, .LBB200_4 ; RV32IA-NEXT: .LBB200_3: # %atomicrmw.start -; RV32IA-NEXT: slt a0, s1, a5 +; RV32IA-NEXT: slt a0, s0, a1 ; RV32IA-NEXT: j .LBB200_5 ; RV32IA-NEXT: .LBB200_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB200_5: # %atomicrmw.start -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB200_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB200_1 ; RV32IA-NEXT: .LBB200_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -14927,31 +14903,30 @@ define i64 @atomicrmw_max_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a3, .LBB200_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bge s0, a2, .LBB200_3 ; RV64I-NEXT: .LBB200_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB200_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB200_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a3, .LBB200_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: blt s0, a2, .LBB200_1 ; RV64I-NEXT: .LBB200_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB200_1 ; RV64I-NEXT: .LBB200_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -14976,43 +14951,41 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB201_3 +; RV32I-NEXT: bne a1, s0, .LBB201_3 ; RV32I-NEXT: j .LBB201_4 ; RV32I-NEXT: .LBB201_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: addi a5, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB201_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB201_4 +; RV32I-NEXT: beq a1, s0, .LBB201_4 ; RV32I-NEXT: .LBB201_3: # %atomicrmw.start -; RV32I-NEXT: slt a0, s1, a5 +; RV32I-NEXT: slt a0, s0, a1 ; RV32I-NEXT: j .LBB201_5 ; RV32I-NEXT: .LBB201_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB201_5: # %atomicrmw.start -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB201_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB201_1 ; RV32I-NEXT: .LBB201_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -15029,43 +15002,41 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB201_3 +; RV32IA-NEXT: bne a1, s0, .LBB201_3 ; RV32IA-NEXT: j .LBB201_4 ; RV32IA-NEXT: .LBB201_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 +; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: addi a4, zero, 2 ; RV32IA-NEXT: addi a5, zero, 2 -; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB201_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB201_4 +; RV32IA-NEXT: beq a1, s0, .LBB201_4 ; RV32IA-NEXT: .LBB201_3: # %atomicrmw.start -; RV32IA-NEXT: slt a0, s1, a5 +; RV32IA-NEXT: slt a0, s0, a1 ; RV32IA-NEXT: j .LBB201_5 ; RV32IA-NEXT: .LBB201_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB201_5: # %atomicrmw.start -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB201_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB201_1 ; RV32IA-NEXT: .LBB201_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -15081,31 +15052,30 @@ define i64 @atomicrmw_max_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a3, .LBB201_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bge s0, a2, .LBB201_3 ; RV64I-NEXT: .LBB201_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: addi a3, zero, 2 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB201_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB201_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a3, .LBB201_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: blt s0, a2, .LBB201_1 ; RV64I-NEXT: .LBB201_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB201_1 ; RV64I-NEXT: .LBB201_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -15130,43 +15100,41 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB202_3 +; RV32I-NEXT: bne a1, s0, .LBB202_3 ; RV32I-NEXT: j .LBB202_4 ; RV32I-NEXT: .LBB202_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) -; RV32I-NEXT: addi a4, zero, 3 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a4, zero, 3 ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB202_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB202_4 +; RV32I-NEXT: beq a1, s0, .LBB202_4 ; RV32I-NEXT: .LBB202_3: # %atomicrmw.start -; RV32I-NEXT: slt a0, s1, a5 +; RV32I-NEXT: slt a0, s0, a1 ; RV32I-NEXT: j .LBB202_5 ; RV32I-NEXT: .LBB202_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB202_5: # %atomicrmw.start -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB202_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB202_1 ; RV32I-NEXT: .LBB202_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -15183,43 +15151,41 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB202_3 +; RV32IA-NEXT: bne a1, s0, .LBB202_3 ; RV32IA-NEXT: j .LBB202_4 ; RV32IA-NEXT: .LBB202_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) -; RV32IA-NEXT: addi a4, zero, 3 -; RV32IA-NEXT: mv a0, s0 +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 ; RV32IA-NEXT: mv a1, s3 +; RV32IA-NEXT: addi a4, zero, 3 ; RV32IA-NEXT: mv a5, zero ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB202_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB202_4 +; RV32IA-NEXT: beq a1, s0, .LBB202_4 ; RV32IA-NEXT: .LBB202_3: # %atomicrmw.start -; RV32IA-NEXT: slt a0, s1, a5 +; RV32IA-NEXT: slt a0, s0, a1 ; RV32IA-NEXT: j .LBB202_5 ; RV32IA-NEXT: .LBB202_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB202_5: # %atomicrmw.start -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB202_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB202_1 ; RV32IA-NEXT: .LBB202_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -15235,31 +15201,30 @@ define i64 @atomicrmw_max_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a3, .LBB202_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bge s0, a2, .LBB202_3 ; RV64I-NEXT: .LBB202_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) -; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: addi a3, zero, 3 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB202_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB202_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a3, .LBB202_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: blt s0, a2, .LBB202_1 ; RV64I-NEXT: .LBB202_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB202_1 ; RV64I-NEXT: .LBB202_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -15284,43 +15249,41 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB203_3 +; RV32I-NEXT: bne a1, s0, .LBB203_3 ; RV32I-NEXT: j .LBB203_4 ; RV32I-NEXT: .LBB203_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a4, zero, 4 ; RV32I-NEXT: addi a5, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB203_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB203_4 +; RV32I-NEXT: beq a1, s0, .LBB203_4 ; RV32I-NEXT: .LBB203_3: # %atomicrmw.start -; RV32I-NEXT: slt a0, s1, a5 +; RV32I-NEXT: slt a0, s0, a1 ; RV32I-NEXT: j .LBB203_5 ; RV32I-NEXT: .LBB203_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB203_5: # %atomicrmw.start -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB203_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB203_1 ; RV32I-NEXT: .LBB203_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -15337,43 +15300,41 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB203_3 +; RV32IA-NEXT: bne a1, s0, .LBB203_3 ; RV32IA-NEXT: j .LBB203_4 ; RV32IA-NEXT: .LBB203_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 +; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: addi a4, zero, 4 ; RV32IA-NEXT: addi a5, zero, 2 -; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB203_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB203_4 +; RV32IA-NEXT: beq a1, s0, .LBB203_4 ; RV32IA-NEXT: .LBB203_3: # %atomicrmw.start -; RV32IA-NEXT: slt a0, s1, a5 +; RV32IA-NEXT: slt a0, s0, a1 ; RV32IA-NEXT: j .LBB203_5 ; RV32IA-NEXT: .LBB203_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB203_5: # %atomicrmw.start -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB203_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB203_1 ; RV32IA-NEXT: .LBB203_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -15389,31 +15350,30 @@ define i64 @atomicrmw_max_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a3, .LBB203_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bge s0, a2, .LBB203_3 ; RV64I-NEXT: .LBB203_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: addi a3, zero, 4 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB203_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB203_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a3, .LBB203_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: blt s0, a2, .LBB203_1 ; RV64I-NEXT: .LBB203_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB203_1 ; RV64I-NEXT: .LBB203_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -15438,43 +15398,41 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB204_3 +; RV32I-NEXT: bne a1, s0, .LBB204_3 ; RV32I-NEXT: j .LBB204_4 ; RV32I-NEXT: .LBB204_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: addi a5, zero, 5 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB204_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB204_4 +; RV32I-NEXT: beq a1, s0, .LBB204_4 ; RV32I-NEXT: .LBB204_3: # %atomicrmw.start -; RV32I-NEXT: slt a0, s1, a5 +; RV32I-NEXT: slt a0, s0, a1 ; RV32I-NEXT: j .LBB204_5 ; RV32I-NEXT: .LBB204_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB204_5: # %atomicrmw.start -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB204_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB204_1 ; RV32I-NEXT: .LBB204_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -15491,43 +15449,41 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB204_3 +; RV32IA-NEXT: bne a1, s0, .LBB204_3 ; RV32IA-NEXT: j .LBB204_4 ; RV32IA-NEXT: .LBB204_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 +; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: addi a4, zero, 5 ; RV32IA-NEXT: addi a5, zero, 5 -; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB204_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB204_4 +; RV32IA-NEXT: beq a1, s0, .LBB204_4 ; RV32IA-NEXT: .LBB204_3: # %atomicrmw.start -; RV32IA-NEXT: slt a0, s1, a5 +; RV32IA-NEXT: slt a0, s0, a1 ; RV32IA-NEXT: j .LBB204_5 ; RV32IA-NEXT: .LBB204_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB204_5: # %atomicrmw.start -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB204_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB204_1 ; RV32IA-NEXT: .LBB204_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -15543,31 +15499,30 @@ define i64 @atomicrmw_max_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a3, .LBB204_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bge s0, a2, .LBB204_3 ; RV64I-NEXT: .LBB204_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: addi a3, zero, 5 ; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB204_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB204_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a3, .LBB204_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: blt s0, a2, .LBB204_1 ; RV64I-NEXT: .LBB204_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB204_1 ; RV64I-NEXT: .LBB204_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -15592,44 +15547,42 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB205_3 +; RV32I-NEXT: bne a1, s0, .LBB205_3 ; RV32I-NEXT: j .LBB205_4 ; RV32I-NEXT: .LBB205_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB205_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB205_4 +; RV32I-NEXT: beq a1, s0, .LBB205_4 ; RV32I-NEXT: .LBB205_3: # %atomicrmw.start -; RV32I-NEXT: slt a0, s1, a5 +; RV32I-NEXT: slt a0, s0, a1 ; RV32I-NEXT: j .LBB205_5 ; RV32I-NEXT: .LBB205_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB205_5: # %atomicrmw.start ; RV32I-NEXT: xori a0, a0, 1 -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB205_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB205_1 ; RV32I-NEXT: .LBB205_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -15646,44 +15599,42 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB205_3 +; RV32IA-NEXT: bne a1, s0, .LBB205_3 ; RV32IA-NEXT: j .LBB205_4 ; RV32IA-NEXT: .LBB205_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) -; RV32IA-NEXT: mv a0, s0 +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 ; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: mv a4, zero ; RV32IA-NEXT: mv a5, zero ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB205_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB205_4 +; RV32IA-NEXT: beq a1, s0, .LBB205_4 ; RV32IA-NEXT: .LBB205_3: # %atomicrmw.start -; RV32IA-NEXT: slt a0, s1, a5 +; RV32IA-NEXT: slt a0, s0, a1 ; RV32IA-NEXT: j .LBB205_5 ; RV32IA-NEXT: .LBB205_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB205_5: # %atomicrmw.start ; RV32IA-NEXT: xori a0, a0, 1 -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB205_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB205_1 ; RV32IA-NEXT: .LBB205_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -15699,31 +15650,30 @@ define i64 @atomicrmw_min_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a3, .LBB205_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: blt s0, a2, .LBB205_3 ; RV64I-NEXT: .LBB205_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB205_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB205_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a3, .LBB205_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bge s0, a2, .LBB205_1 ; RV64I-NEXT: .LBB205_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB205_1 ; RV64I-NEXT: .LBB205_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -15748,44 +15698,42 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB206_3 +; RV32I-NEXT: bne a1, s0, .LBB206_3 ; RV32I-NEXT: j .LBB206_4 ; RV32I-NEXT: .LBB206_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: addi a5, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB206_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB206_4 +; RV32I-NEXT: beq a1, s0, .LBB206_4 ; RV32I-NEXT: .LBB206_3: # %atomicrmw.start -; RV32I-NEXT: slt a0, s1, a5 +; RV32I-NEXT: slt a0, s0, a1 ; RV32I-NEXT: j .LBB206_5 ; RV32I-NEXT: .LBB206_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB206_5: # %atomicrmw.start ; RV32I-NEXT: xori a0, a0, 1 -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB206_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB206_1 ; RV32I-NEXT: .LBB206_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -15802,44 +15750,42 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB206_3 +; RV32IA-NEXT: bne a1, s0, .LBB206_3 ; RV32IA-NEXT: j .LBB206_4 ; RV32IA-NEXT: .LBB206_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 +; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: addi a4, zero, 2 ; RV32IA-NEXT: addi a5, zero, 2 -; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB206_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB206_4 +; RV32IA-NEXT: beq a1, s0, .LBB206_4 ; RV32IA-NEXT: .LBB206_3: # %atomicrmw.start -; RV32IA-NEXT: slt a0, s1, a5 +; RV32IA-NEXT: slt a0, s0, a1 ; RV32IA-NEXT: j .LBB206_5 ; RV32IA-NEXT: .LBB206_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB206_5: # %atomicrmw.start ; RV32IA-NEXT: xori a0, a0, 1 -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB206_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB206_1 ; RV32IA-NEXT: .LBB206_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -15855,31 +15801,30 @@ define i64 @atomicrmw_min_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a3, .LBB206_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: blt s0, a2, .LBB206_3 ; RV64I-NEXT: .LBB206_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: addi a3, zero, 2 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB206_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB206_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a3, .LBB206_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bge s0, a2, .LBB206_1 ; RV64I-NEXT: .LBB206_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB206_1 ; RV64I-NEXT: .LBB206_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -15904,44 +15849,42 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB207_3 +; RV32I-NEXT: bne a1, s0, .LBB207_3 ; RV32I-NEXT: j .LBB207_4 ; RV32I-NEXT: .LBB207_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) -; RV32I-NEXT: addi a4, zero, 3 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a4, zero, 3 ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB207_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB207_4 +; RV32I-NEXT: beq a1, s0, .LBB207_4 ; RV32I-NEXT: .LBB207_3: # %atomicrmw.start -; RV32I-NEXT: slt a0, s1, a5 +; RV32I-NEXT: slt a0, s0, a1 ; RV32I-NEXT: j .LBB207_5 ; RV32I-NEXT: .LBB207_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB207_5: # %atomicrmw.start ; RV32I-NEXT: xori a0, a0, 1 -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB207_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB207_1 ; RV32I-NEXT: .LBB207_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -15958,44 +15901,42 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB207_3 +; RV32IA-NEXT: bne a1, s0, .LBB207_3 ; RV32IA-NEXT: j .LBB207_4 ; RV32IA-NEXT: .LBB207_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) -; RV32IA-NEXT: addi a4, zero, 3 -; RV32IA-NEXT: mv a0, s0 +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 ; RV32IA-NEXT: mv a1, s3 +; RV32IA-NEXT: addi a4, zero, 3 ; RV32IA-NEXT: mv a5, zero ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB207_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB207_4 +; RV32IA-NEXT: beq a1, s0, .LBB207_4 ; RV32IA-NEXT: .LBB207_3: # %atomicrmw.start -; RV32IA-NEXT: slt a0, s1, a5 +; RV32IA-NEXT: slt a0, s0, a1 ; RV32IA-NEXT: j .LBB207_5 ; RV32IA-NEXT: .LBB207_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB207_5: # %atomicrmw.start ; RV32IA-NEXT: xori a0, a0, 1 -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB207_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB207_1 ; RV32IA-NEXT: .LBB207_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -16011,31 +15952,30 @@ define i64 @atomicrmw_min_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a3, .LBB207_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: blt s0, a2, .LBB207_3 ; RV64I-NEXT: .LBB207_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) -; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: addi a3, zero, 3 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB207_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB207_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a3, .LBB207_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bge s0, a2, .LBB207_1 ; RV64I-NEXT: .LBB207_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB207_1 ; RV64I-NEXT: .LBB207_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -16060,44 +16000,42 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB208_3 +; RV32I-NEXT: bne a1, s0, .LBB208_3 ; RV32I-NEXT: j .LBB208_4 ; RV32I-NEXT: .LBB208_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a4, zero, 4 ; RV32I-NEXT: addi a5, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB208_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB208_4 +; RV32I-NEXT: beq a1, s0, .LBB208_4 ; RV32I-NEXT: .LBB208_3: # %atomicrmw.start -; RV32I-NEXT: slt a0, s1, a5 +; RV32I-NEXT: slt a0, s0, a1 ; RV32I-NEXT: j .LBB208_5 ; RV32I-NEXT: .LBB208_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB208_5: # %atomicrmw.start ; RV32I-NEXT: xori a0, a0, 1 -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB208_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB208_1 ; RV32I-NEXT: .LBB208_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -16114,44 +16052,42 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB208_3 +; RV32IA-NEXT: bne a1, s0, .LBB208_3 ; RV32IA-NEXT: j .LBB208_4 ; RV32IA-NEXT: .LBB208_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 +; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: addi a4, zero, 4 ; RV32IA-NEXT: addi a5, zero, 2 -; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB208_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB208_4 +; RV32IA-NEXT: beq a1, s0, .LBB208_4 ; RV32IA-NEXT: .LBB208_3: # %atomicrmw.start -; RV32IA-NEXT: slt a0, s1, a5 +; RV32IA-NEXT: slt a0, s0, a1 ; RV32IA-NEXT: j .LBB208_5 ; RV32IA-NEXT: .LBB208_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB208_5: # %atomicrmw.start ; RV32IA-NEXT: xori a0, a0, 1 -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB208_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB208_1 ; RV32IA-NEXT: .LBB208_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -16167,31 +16103,30 @@ define i64 @atomicrmw_min_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a3, .LBB208_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: blt s0, a2, .LBB208_3 ; RV64I-NEXT: .LBB208_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: addi a3, zero, 4 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB208_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB208_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a3, .LBB208_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bge s0, a2, .LBB208_1 ; RV64I-NEXT: .LBB208_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB208_1 ; RV64I-NEXT: .LBB208_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -16216,44 +16151,42 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB209_3 +; RV32I-NEXT: bne a1, s0, .LBB209_3 ; RV32I-NEXT: j .LBB209_4 ; RV32I-NEXT: .LBB209_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: addi a5, zero, 5 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB209_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB209_4 +; RV32I-NEXT: beq a1, s0, .LBB209_4 ; RV32I-NEXT: .LBB209_3: # %atomicrmw.start -; RV32I-NEXT: slt a0, s1, a5 +; RV32I-NEXT: slt a0, s0, a1 ; RV32I-NEXT: j .LBB209_5 ; RV32I-NEXT: .LBB209_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB209_5: # %atomicrmw.start ; RV32I-NEXT: xori a0, a0, 1 -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB209_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB209_1 ; RV32I-NEXT: .LBB209_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -16270,44 +16203,42 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB209_3 +; RV32IA-NEXT: bne a1, s0, .LBB209_3 ; RV32IA-NEXT: j .LBB209_4 ; RV32IA-NEXT: .LBB209_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 +; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: addi a4, zero, 5 ; RV32IA-NEXT: addi a5, zero, 5 -; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB209_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB209_4 +; RV32IA-NEXT: beq a1, s0, .LBB209_4 ; RV32IA-NEXT: .LBB209_3: # %atomicrmw.start -; RV32IA-NEXT: slt a0, s1, a5 +; RV32IA-NEXT: slt a0, s0, a1 ; RV32IA-NEXT: j .LBB209_5 ; RV32IA-NEXT: .LBB209_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB209_5: # %atomicrmw.start ; RV32IA-NEXT: xori a0, a0, 1 -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB209_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB209_1 ; RV32IA-NEXT: .LBB209_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -16323,31 +16254,30 @@ define i64 @atomicrmw_min_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: blt s1, a3, .LBB209_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: blt s0, a2, .LBB209_3 ; RV64I-NEXT: .LBB209_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: addi a3, zero, 5 ; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB209_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB209_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bge s1, a3, .LBB209_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bge s0, a2, .LBB209_1 ; RV64I-NEXT: .LBB209_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB209_1 ; RV64I-NEXT: .LBB209_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -16372,43 +16302,41 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB210_3 +; RV32I-NEXT: bne a1, s0, .LBB210_3 ; RV32I-NEXT: j .LBB210_4 ; RV32I-NEXT: .LBB210_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB210_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB210_4 +; RV32I-NEXT: beq a1, s0, .LBB210_4 ; RV32I-NEXT: .LBB210_3: # %atomicrmw.start -; RV32I-NEXT: sltu a0, s1, a5 +; RV32I-NEXT: sltu a0, s0, a1 ; RV32I-NEXT: j .LBB210_5 ; RV32I-NEXT: .LBB210_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB210_5: # %atomicrmw.start -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB210_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB210_1 ; RV32I-NEXT: .LBB210_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -16425,43 +16353,41 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB210_3 +; RV32IA-NEXT: bne a1, s0, .LBB210_3 ; RV32IA-NEXT: j .LBB210_4 ; RV32IA-NEXT: .LBB210_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) -; RV32IA-NEXT: mv a0, s0 +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 ; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: mv a4, zero ; RV32IA-NEXT: mv a5, zero ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB210_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB210_4 +; RV32IA-NEXT: beq a1, s0, .LBB210_4 ; RV32IA-NEXT: .LBB210_3: # %atomicrmw.start -; RV32IA-NEXT: sltu a0, s1, a5 +; RV32IA-NEXT: sltu a0, s0, a1 ; RV32IA-NEXT: j .LBB210_5 ; RV32IA-NEXT: .LBB210_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB210_5: # %atomicrmw.start -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB210_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB210_1 ; RV32IA-NEXT: .LBB210_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -16477,31 +16403,30 @@ define i64 @atomicrmw_umax_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a3, .LBB210_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bgeu s0, a2, .LBB210_3 ; RV64I-NEXT: .LBB210_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB210_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB210_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a3, .LBB210_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bltu s0, a2, .LBB210_1 ; RV64I-NEXT: .LBB210_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB210_1 ; RV64I-NEXT: .LBB210_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -16526,43 +16451,41 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB211_3 +; RV32I-NEXT: bne a1, s0, .LBB211_3 ; RV32I-NEXT: j .LBB211_4 ; RV32I-NEXT: .LBB211_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: addi a5, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB211_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB211_4 +; RV32I-NEXT: beq a1, s0, .LBB211_4 ; RV32I-NEXT: .LBB211_3: # %atomicrmw.start -; RV32I-NEXT: sltu a0, s1, a5 +; RV32I-NEXT: sltu a0, s0, a1 ; RV32I-NEXT: j .LBB211_5 ; RV32I-NEXT: .LBB211_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB211_5: # %atomicrmw.start -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB211_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB211_1 ; RV32I-NEXT: .LBB211_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -16579,43 +16502,41 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB211_3 +; RV32IA-NEXT: bne a1, s0, .LBB211_3 ; RV32IA-NEXT: j .LBB211_4 ; RV32IA-NEXT: .LBB211_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 +; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: addi a4, zero, 2 ; RV32IA-NEXT: addi a5, zero, 2 -; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB211_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB211_4 +; RV32IA-NEXT: beq a1, s0, .LBB211_4 ; RV32IA-NEXT: .LBB211_3: # %atomicrmw.start -; RV32IA-NEXT: sltu a0, s1, a5 +; RV32IA-NEXT: sltu a0, s0, a1 ; RV32IA-NEXT: j .LBB211_5 ; RV32IA-NEXT: .LBB211_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB211_5: # %atomicrmw.start -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB211_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB211_1 ; RV32IA-NEXT: .LBB211_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -16631,31 +16552,30 @@ define i64 @atomicrmw_umax_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a3, .LBB211_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bgeu s0, a2, .LBB211_3 ; RV64I-NEXT: .LBB211_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: addi a3, zero, 2 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB211_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB211_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a3, .LBB211_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bltu s0, a2, .LBB211_1 ; RV64I-NEXT: .LBB211_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB211_1 ; RV64I-NEXT: .LBB211_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -16680,43 +16600,41 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB212_3 +; RV32I-NEXT: bne a1, s0, .LBB212_3 ; RV32I-NEXT: j .LBB212_4 ; RV32I-NEXT: .LBB212_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) -; RV32I-NEXT: addi a4, zero, 3 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a4, zero, 3 ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB212_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB212_4 +; RV32I-NEXT: beq a1, s0, .LBB212_4 ; RV32I-NEXT: .LBB212_3: # %atomicrmw.start -; RV32I-NEXT: sltu a0, s1, a5 +; RV32I-NEXT: sltu a0, s0, a1 ; RV32I-NEXT: j .LBB212_5 ; RV32I-NEXT: .LBB212_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB212_5: # %atomicrmw.start -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB212_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB212_1 ; RV32I-NEXT: .LBB212_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -16733,43 +16651,41 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB212_3 +; RV32IA-NEXT: bne a1, s0, .LBB212_3 ; RV32IA-NEXT: j .LBB212_4 ; RV32IA-NEXT: .LBB212_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) -; RV32IA-NEXT: addi a4, zero, 3 -; RV32IA-NEXT: mv a0, s0 +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 ; RV32IA-NEXT: mv a1, s3 +; RV32IA-NEXT: addi a4, zero, 3 ; RV32IA-NEXT: mv a5, zero ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB212_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB212_4 +; RV32IA-NEXT: beq a1, s0, .LBB212_4 ; RV32IA-NEXT: .LBB212_3: # %atomicrmw.start -; RV32IA-NEXT: sltu a0, s1, a5 +; RV32IA-NEXT: sltu a0, s0, a1 ; RV32IA-NEXT: j .LBB212_5 ; RV32IA-NEXT: .LBB212_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB212_5: # %atomicrmw.start -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB212_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB212_1 ; RV32IA-NEXT: .LBB212_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -16785,31 +16701,30 @@ define i64 @atomicrmw_umax_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a3, .LBB212_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bgeu s0, a2, .LBB212_3 ; RV64I-NEXT: .LBB212_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) -; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: addi a3, zero, 3 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB212_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB212_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a3, .LBB212_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bltu s0, a2, .LBB212_1 ; RV64I-NEXT: .LBB212_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB212_1 ; RV64I-NEXT: .LBB212_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -16834,43 +16749,41 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB213_3 +; RV32I-NEXT: bne a1, s0, .LBB213_3 ; RV32I-NEXT: j .LBB213_4 ; RV32I-NEXT: .LBB213_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a4, zero, 4 ; RV32I-NEXT: addi a5, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB213_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB213_4 +; RV32I-NEXT: beq a1, s0, .LBB213_4 ; RV32I-NEXT: .LBB213_3: # %atomicrmw.start -; RV32I-NEXT: sltu a0, s1, a5 +; RV32I-NEXT: sltu a0, s0, a1 ; RV32I-NEXT: j .LBB213_5 ; RV32I-NEXT: .LBB213_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB213_5: # %atomicrmw.start -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB213_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB213_1 ; RV32I-NEXT: .LBB213_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -16887,43 +16800,41 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB213_3 +; RV32IA-NEXT: bne a1, s0, .LBB213_3 ; RV32IA-NEXT: j .LBB213_4 ; RV32IA-NEXT: .LBB213_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 +; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: addi a4, zero, 4 ; RV32IA-NEXT: addi a5, zero, 2 -; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB213_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB213_4 +; RV32IA-NEXT: beq a1, s0, .LBB213_4 ; RV32IA-NEXT: .LBB213_3: # %atomicrmw.start -; RV32IA-NEXT: sltu a0, s1, a5 +; RV32IA-NEXT: sltu a0, s0, a1 ; RV32IA-NEXT: j .LBB213_5 ; RV32IA-NEXT: .LBB213_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB213_5: # %atomicrmw.start -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB213_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB213_1 ; RV32IA-NEXT: .LBB213_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -16939,31 +16850,30 @@ define i64 @atomicrmw_umax_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a3, .LBB213_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bgeu s0, a2, .LBB213_3 ; RV64I-NEXT: .LBB213_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: addi a3, zero, 4 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB213_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB213_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a3, .LBB213_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bltu s0, a2, .LBB213_1 ; RV64I-NEXT: .LBB213_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB213_1 ; RV64I-NEXT: .LBB213_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -16988,43 +16898,41 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB214_3 +; RV32I-NEXT: bne a1, s0, .LBB214_3 ; RV32I-NEXT: j .LBB214_4 ; RV32I-NEXT: .LBB214_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: addi a5, zero, 5 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB214_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB214_4 +; RV32I-NEXT: beq a1, s0, .LBB214_4 ; RV32I-NEXT: .LBB214_3: # %atomicrmw.start -; RV32I-NEXT: sltu a0, s1, a5 +; RV32I-NEXT: sltu a0, s0, a1 ; RV32I-NEXT: j .LBB214_5 ; RV32I-NEXT: .LBB214_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB214_5: # %atomicrmw.start -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB214_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB214_1 ; RV32I-NEXT: .LBB214_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -17041,43 +16949,41 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB214_3 +; RV32IA-NEXT: bne a1, s0, .LBB214_3 ; RV32IA-NEXT: j .LBB214_4 ; RV32IA-NEXT: .LBB214_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 +; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: addi a4, zero, 5 ; RV32IA-NEXT: addi a5, zero, 5 -; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB214_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB214_4 +; RV32IA-NEXT: beq a1, s0, .LBB214_4 ; RV32IA-NEXT: .LBB214_3: # %atomicrmw.start -; RV32IA-NEXT: sltu a0, s1, a5 +; RV32IA-NEXT: sltu a0, s0, a1 ; RV32IA-NEXT: j .LBB214_5 ; RV32IA-NEXT: .LBB214_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB214_5: # %atomicrmw.start -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB214_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB214_1 ; RV32IA-NEXT: .LBB214_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -17093,31 +16999,30 @@ define i64 @atomicrmw_umax_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a3, .LBB214_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bgeu s0, a2, .LBB214_3 ; RV64I-NEXT: .LBB214_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: addi a3, zero, 5 ; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB214_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB214_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a3, .LBB214_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bltu s0, a2, .LBB214_1 ; RV64I-NEXT: .LBB214_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB214_1 ; RV64I-NEXT: .LBB214_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -17142,44 +17047,42 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB215_3 +; RV32I-NEXT: bne a1, s0, .LBB215_3 ; RV32I-NEXT: j .LBB215_4 ; RV32I-NEXT: .LBB215_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: mv a4, zero ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB215_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB215_4 +; RV32I-NEXT: beq a1, s0, .LBB215_4 ; RV32I-NEXT: .LBB215_3: # %atomicrmw.start -; RV32I-NEXT: sltu a0, s1, a5 +; RV32I-NEXT: sltu a0, s0, a1 ; RV32I-NEXT: j .LBB215_5 ; RV32I-NEXT: .LBB215_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB215_5: # %atomicrmw.start ; RV32I-NEXT: xori a0, a0, 1 -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB215_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB215_1 ; RV32I-NEXT: .LBB215_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -17196,44 +17099,42 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB215_3 +; RV32IA-NEXT: bne a1, s0, .LBB215_3 ; RV32IA-NEXT: j .LBB215_4 ; RV32IA-NEXT: .LBB215_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) -; RV32IA-NEXT: mv a0, s0 +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 ; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: mv a4, zero ; RV32IA-NEXT: mv a5, zero ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB215_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB215_4 +; RV32IA-NEXT: beq a1, s0, .LBB215_4 ; RV32IA-NEXT: .LBB215_3: # %atomicrmw.start -; RV32IA-NEXT: sltu a0, s1, a5 +; RV32IA-NEXT: sltu a0, s0, a1 ; RV32IA-NEXT: j .LBB215_5 ; RV32IA-NEXT: .LBB215_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB215_5: # %atomicrmw.start ; RV32IA-NEXT: xori a0, a0, 1 -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB215_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB215_1 ; RV32IA-NEXT: .LBB215_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -17249,31 +17150,30 @@ define i64 @atomicrmw_umin_i64_monotonic(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a3, .LBB215_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bltu s0, a2, .LBB215_3 ; RV64I-NEXT: .LBB215_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB215_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB215_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a3, .LBB215_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bgeu s0, a2, .LBB215_1 ; RV64I-NEXT: .LBB215_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB215_1 ; RV64I-NEXT: .LBB215_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -17298,44 +17198,42 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB216_3 +; RV32I-NEXT: bne a1, s0, .LBB216_3 ; RV32I-NEXT: j .LBB216_4 ; RV32I-NEXT: .LBB216_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a4, zero, 2 ; RV32I-NEXT: addi a5, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB216_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB216_4 +; RV32I-NEXT: beq a1, s0, .LBB216_4 ; RV32I-NEXT: .LBB216_3: # %atomicrmw.start -; RV32I-NEXT: sltu a0, s1, a5 +; RV32I-NEXT: sltu a0, s0, a1 ; RV32I-NEXT: j .LBB216_5 ; RV32I-NEXT: .LBB216_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB216_5: # %atomicrmw.start ; RV32I-NEXT: xori a0, a0, 1 -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB216_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB216_1 ; RV32I-NEXT: .LBB216_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -17352,44 +17250,42 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB216_3 +; RV32IA-NEXT: bne a1, s0, .LBB216_3 ; RV32IA-NEXT: j .LBB216_4 ; RV32IA-NEXT: .LBB216_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 +; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: addi a4, zero, 2 ; RV32IA-NEXT: addi a5, zero, 2 -; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB216_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB216_4 +; RV32IA-NEXT: beq a1, s0, .LBB216_4 ; RV32IA-NEXT: .LBB216_3: # %atomicrmw.start -; RV32IA-NEXT: sltu a0, s1, a5 +; RV32IA-NEXT: sltu a0, s0, a1 ; RV32IA-NEXT: j .LBB216_5 ; RV32IA-NEXT: .LBB216_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB216_5: # %atomicrmw.start ; RV32IA-NEXT: xori a0, a0, 1 -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB216_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB216_1 ; RV32IA-NEXT: .LBB216_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -17405,31 +17301,30 @@ define i64 @atomicrmw_umin_i64_acquire(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a3, .LBB216_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bltu s0, a2, .LBB216_3 ; RV64I-NEXT: .LBB216_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: addi a3, zero, 2 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB216_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB216_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a3, .LBB216_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bgeu s0, a2, .LBB216_1 ; RV64I-NEXT: .LBB216_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB216_1 ; RV64I-NEXT: .LBB216_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -17454,44 +17349,42 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB217_3 +; RV32I-NEXT: bne a1, s0, .LBB217_3 ; RV32I-NEXT: j .LBB217_4 ; RV32I-NEXT: .LBB217_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) -; RV32I-NEXT: addi a4, zero, 3 -; RV32I-NEXT: mv a0, s0 +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 ; RV32I-NEXT: mv a1, s3 +; RV32I-NEXT: addi a4, zero, 3 ; RV32I-NEXT: mv a5, zero ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB217_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB217_4 +; RV32I-NEXT: beq a1, s0, .LBB217_4 ; RV32I-NEXT: .LBB217_3: # %atomicrmw.start -; RV32I-NEXT: sltu a0, s1, a5 +; RV32I-NEXT: sltu a0, s0, a1 ; RV32I-NEXT: j .LBB217_5 ; RV32I-NEXT: .LBB217_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB217_5: # %atomicrmw.start ; RV32I-NEXT: xori a0, a0, 1 -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB217_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB217_1 ; RV32I-NEXT: .LBB217_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -17508,44 +17401,42 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB217_3 +; RV32IA-NEXT: bne a1, s0, .LBB217_3 ; RV32IA-NEXT: j .LBB217_4 ; RV32IA-NEXT: .LBB217_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) -; RV32IA-NEXT: addi a4, zero, 3 -; RV32IA-NEXT: mv a0, s0 +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 ; RV32IA-NEXT: mv a1, s3 +; RV32IA-NEXT: addi a4, zero, 3 ; RV32IA-NEXT: mv a5, zero ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB217_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB217_4 +; RV32IA-NEXT: beq a1, s0, .LBB217_4 ; RV32IA-NEXT: .LBB217_3: # %atomicrmw.start -; RV32IA-NEXT: sltu a0, s1, a5 +; RV32IA-NEXT: sltu a0, s0, a1 ; RV32IA-NEXT: j .LBB217_5 ; RV32IA-NEXT: .LBB217_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB217_5: # %atomicrmw.start ; RV32IA-NEXT: xori a0, a0, 1 -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB217_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB217_1 ; RV32IA-NEXT: .LBB217_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -17561,31 +17452,30 @@ define i64 @atomicrmw_umin_i64_release(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a3, .LBB217_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bltu s0, a2, .LBB217_3 ; RV64I-NEXT: .LBB217_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) -; RV64I-NEXT: addi a3, zero, 3 -; RV64I-NEXT: mv a0, s0 +; RV64I-NEXT: mv a0, s1 ; RV64I-NEXT: mv a1, s2 +; RV64I-NEXT: addi a3, zero, 3 ; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB217_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB217_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a3, .LBB217_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bgeu s0, a2, .LBB217_1 ; RV64I-NEXT: .LBB217_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB217_1 ; RV64I-NEXT: .LBB217_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -17610,44 +17500,42 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB218_3 +; RV32I-NEXT: bne a1, s0, .LBB218_3 ; RV32I-NEXT: j .LBB218_4 ; RV32I-NEXT: .LBB218_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a4, zero, 4 ; RV32I-NEXT: addi a5, zero, 2 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB218_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB218_4 +; RV32I-NEXT: beq a1, s0, .LBB218_4 ; RV32I-NEXT: .LBB218_3: # %atomicrmw.start -; RV32I-NEXT: sltu a0, s1, a5 +; RV32I-NEXT: sltu a0, s0, a1 ; RV32I-NEXT: j .LBB218_5 ; RV32I-NEXT: .LBB218_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB218_5: # %atomicrmw.start ; RV32I-NEXT: xori a0, a0, 1 -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB218_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB218_1 ; RV32I-NEXT: .LBB218_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -17664,44 +17552,42 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB218_3 +; RV32IA-NEXT: bne a1, s0, .LBB218_3 ; RV32IA-NEXT: j .LBB218_4 ; RV32IA-NEXT: .LBB218_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 +; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: addi a4, zero, 4 ; RV32IA-NEXT: addi a5, zero, 2 -; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB218_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB218_4 +; RV32IA-NEXT: beq a1, s0, .LBB218_4 ; RV32IA-NEXT: .LBB218_3: # %atomicrmw.start -; RV32IA-NEXT: sltu a0, s1, a5 +; RV32IA-NEXT: sltu a0, s0, a1 ; RV32IA-NEXT: j .LBB218_5 ; RV32IA-NEXT: .LBB218_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB218_5: # %atomicrmw.start ; RV32IA-NEXT: xori a0, a0, 1 -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB218_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB218_1 ; RV32IA-NEXT: .LBB218_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -17717,31 +17603,30 @@ define i64 @atomicrmw_umin_i64_acq_rel(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a3, .LBB218_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bltu s0, a2, .LBB218_3 ; RV64I-NEXT: .LBB218_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: addi a3, zero, 4 ; RV64I-NEXT: addi a4, zero, 2 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB218_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB218_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a3, .LBB218_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bgeu s0, a2, .LBB218_1 ; RV64I-NEXT: .LBB218_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB218_1 ; RV64I-NEXT: .LBB218_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) @@ -17766,44 +17651,42 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV32I-NEXT: sw s1, 20(sp) ; RV32I-NEXT: sw s2, 16(sp) ; RV32I-NEXT: sw s3, 12(sp) -; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: lw a5, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: mv s1, a2 +; RV32I-NEXT: mv s0, a2 ; RV32I-NEXT: mv s2, a1 +; RV32I-NEXT: mv s1, a0 +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: lw a2, 0(a0) ; RV32I-NEXT: mv s3, sp -; RV32I-NEXT: bne a5, s1, .LBB219_3 +; RV32I-NEXT: bne a1, s0, .LBB219_3 ; RV32I-NEXT: j .LBB219_4 ; RV32I-NEXT: .LBB219_1: # %atomicrmw.start -; RV32I-NEXT: sw a4, 0(sp) -; RV32I-NEXT: sw a5, 4(sp) +; RV32I-NEXT: sw a1, 4(sp) +; RV32I-NEXT: mv a0, s1 +; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: addi a4, zero, 5 ; RV32I-NEXT: addi a5, zero, 5 -; RV32I-NEXT: mv a0, s0 -; RV32I-NEXT: mv a1, s3 ; RV32I-NEXT: call __atomic_compare_exchange_8 -; RV32I-NEXT: lw a5, 4(sp) -; RV32I-NEXT: lw a4, 0(sp) +; RV32I-NEXT: lw a1, 4(sp) +; RV32I-NEXT: lw a2, 0(sp) ; RV32I-NEXT: bnez a0, .LBB219_7 ; RV32I-NEXT: # %bb.2: # %atomicrmw.start -; RV32I-NEXT: beq a5, s1, .LBB219_4 +; RV32I-NEXT: beq a1, s0, .LBB219_4 ; RV32I-NEXT: .LBB219_3: # %atomicrmw.start -; RV32I-NEXT: sltu a0, s1, a5 +; RV32I-NEXT: sltu a0, s0, a1 ; RV32I-NEXT: j .LBB219_5 ; RV32I-NEXT: .LBB219_4: -; RV32I-NEXT: sltu a0, s2, a4 +; RV32I-NEXT: sltu a0, s2, a2 ; RV32I-NEXT: .LBB219_5: # %atomicrmw.start ; RV32I-NEXT: xori a0, a0, 1 -; RV32I-NEXT: mv a2, a4 -; RV32I-NEXT: mv a3, a5 +; RV32I-NEXT: sw a2, 0(sp) +; RV32I-NEXT: mv a3, a1 ; RV32I-NEXT: bnez a0, .LBB219_1 ; RV32I-NEXT: # %bb.6: # %atomicrmw.start ; RV32I-NEXT: mv a2, s2 -; RV32I-NEXT: mv a3, s1 +; RV32I-NEXT: mv a3, s0 ; RV32I-NEXT: j .LBB219_1 ; RV32I-NEXT: .LBB219_7: # %atomicrmw.end -; RV32I-NEXT: mv a0, a4 -; RV32I-NEXT: mv a1, a5 +; RV32I-NEXT: mv a0, a2 ; RV32I-NEXT: lw s3, 12(sp) ; RV32I-NEXT: lw s2, 16(sp) ; RV32I-NEXT: lw s1, 20(sp) @@ -17820,44 +17703,42 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV32IA-NEXT: sw s1, 20(sp) ; RV32IA-NEXT: sw s2, 16(sp) ; RV32IA-NEXT: sw s3, 12(sp) -; RV32IA-NEXT: mv s0, a0 -; RV32IA-NEXT: lw a5, 4(a0) -; RV32IA-NEXT: lw a4, 0(a0) -; RV32IA-NEXT: mv s1, a2 +; RV32IA-NEXT: mv s0, a2 ; RV32IA-NEXT: mv s2, a1 +; RV32IA-NEXT: mv s1, a0 +; RV32IA-NEXT: lw a1, 4(a0) +; RV32IA-NEXT: lw a2, 0(a0) ; RV32IA-NEXT: mv s3, sp -; RV32IA-NEXT: bne a5, s1, .LBB219_3 +; RV32IA-NEXT: bne a1, s0, .LBB219_3 ; RV32IA-NEXT: j .LBB219_4 ; RV32IA-NEXT: .LBB219_1: # %atomicrmw.start -; RV32IA-NEXT: sw a4, 0(sp) -; RV32IA-NEXT: sw a5, 4(sp) +; RV32IA-NEXT: sw a1, 4(sp) +; RV32IA-NEXT: mv a0, s1 +; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: addi a4, zero, 5 ; RV32IA-NEXT: addi a5, zero, 5 -; RV32IA-NEXT: mv a0, s0 -; RV32IA-NEXT: mv a1, s3 ; RV32IA-NEXT: call __atomic_compare_exchange_8 -; RV32IA-NEXT: lw a5, 4(sp) -; RV32IA-NEXT: lw a4, 0(sp) +; RV32IA-NEXT: lw a1, 4(sp) +; RV32IA-NEXT: lw a2, 0(sp) ; RV32IA-NEXT: bnez a0, .LBB219_7 ; RV32IA-NEXT: # %bb.2: # %atomicrmw.start -; RV32IA-NEXT: beq a5, s1, .LBB219_4 +; RV32IA-NEXT: beq a1, s0, .LBB219_4 ; RV32IA-NEXT: .LBB219_3: # %atomicrmw.start -; RV32IA-NEXT: sltu a0, s1, a5 +; RV32IA-NEXT: sltu a0, s0, a1 ; RV32IA-NEXT: j .LBB219_5 ; RV32IA-NEXT: .LBB219_4: -; RV32IA-NEXT: sltu a0, s2, a4 +; RV32IA-NEXT: sltu a0, s2, a2 ; RV32IA-NEXT: .LBB219_5: # %atomicrmw.start ; RV32IA-NEXT: xori a0, a0, 1 -; RV32IA-NEXT: mv a2, a4 -; RV32IA-NEXT: mv a3, a5 +; RV32IA-NEXT: sw a2, 0(sp) +; RV32IA-NEXT: mv a3, a1 ; RV32IA-NEXT: bnez a0, .LBB219_1 ; RV32IA-NEXT: # %bb.6: # %atomicrmw.start ; RV32IA-NEXT: mv a2, s2 -; RV32IA-NEXT: mv a3, s1 +; RV32IA-NEXT: mv a3, s0 ; RV32IA-NEXT: j .LBB219_1 ; RV32IA-NEXT: .LBB219_7: # %atomicrmw.end -; RV32IA-NEXT: mv a0, a4 -; RV32IA-NEXT: mv a1, a5 +; RV32IA-NEXT: mv a0, a2 ; RV32IA-NEXT: lw s3, 12(sp) ; RV32IA-NEXT: lw s2, 16(sp) ; RV32IA-NEXT: lw s1, 20(sp) @@ -17873,31 +17754,30 @@ define i64 @atomicrmw_umin_i64_seq_cst(i64 *%a, i64 %b) nounwind { ; RV64I-NEXT: sd s0, 32(sp) ; RV64I-NEXT: sd s1, 24(sp) ; RV64I-NEXT: sd s2, 16(sp) -; RV64I-NEXT: mv s0, a0 -; RV64I-NEXT: ld a3, 0(a0) -; RV64I-NEXT: mv s1, a1 +; RV64I-NEXT: mv s0, a1 +; RV64I-NEXT: mv s1, a0 +; RV64I-NEXT: ld a2, 0(a0) ; RV64I-NEXT: addi s2, sp, 8 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bltu s1, a3, .LBB219_3 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bltu s0, a2, .LBB219_3 ; RV64I-NEXT: .LBB219_1: # %atomicrmw.start ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV64I-NEXT: sd a3, 8(sp) +; RV64I-NEXT: mv a0, s1 +; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: addi a3, zero, 5 ; RV64I-NEXT: addi a4, zero, 5 -; RV64I-NEXT: mv a0, s0 -; RV64I-NEXT: mv a1, s2 ; RV64I-NEXT: call __atomic_compare_exchange_8 -; RV64I-NEXT: ld a3, 8(sp) +; RV64I-NEXT: ld a2, 8(sp) ; RV64I-NEXT: bnez a0, .LBB219_4 ; RV64I-NEXT: # %bb.2: # %atomicrmw.start ; RV64I-NEXT: # in Loop: Header=BB219_1 Depth=1 -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bgeu s1, a3, .LBB219_1 +; RV64I-NEXT: sd a2, 8(sp) +; RV64I-NEXT: bgeu s0, a2, .LBB219_1 ; RV64I-NEXT: .LBB219_3: # %atomicrmw.start -; RV64I-NEXT: mv a2, s1 +; RV64I-NEXT: mv a2, s0 ; RV64I-NEXT: j .LBB219_1 ; RV64I-NEXT: .LBB219_4: # %atomicrmw.end -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a0, a2 ; RV64I-NEXT: ld s2, 16(sp) ; RV64I-NEXT: ld s1, 24(sp) ; RV64I-NEXT: ld s0, 32(sp) diff --git a/llvm/test/CodeGen/RISCV/bare-select.ll b/llvm/test/CodeGen/RISCV/bare-select.ll index cf8fe96..1b8f2f6 100644 --- a/llvm/test/CodeGen/RISCV/bare-select.ll +++ b/llvm/test/CodeGen/RISCV/bare-select.ll @@ -5,12 +5,12 @@ define i32 @bare_select(i1 %a, i32 %b, i32 %c) nounwind { ; RV32I-LABEL: bare_select: ; RV32I: # %bb.0: -; RV32I-NEXT: andi a3, a0, 1 -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: bnez a3, .LBB0_2 +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: bnez a0, .LBB0_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: .LBB0_2: +; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: ret %1 = select i1 %a, i32 %b, i32 %c ret i32 %1 @@ -19,12 +19,12 @@ define i32 @bare_select(i1 %a, i32 %b, i32 %c) nounwind { define float @bare_select_float(i1 %a, float %b, float %c) nounwind { ; RV32I-LABEL: bare_select_float: ; RV32I: # %bb.0: -; RV32I-NEXT: andi a3, a0, 1 -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: bnez a3, .LBB1_2 +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: bnez a0, .LBB1_2 ; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: .LBB1_2: +; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: ret %1 = select i1 %a, float %b, float %c ret float %1 diff --git a/llvm/test/CodeGen/RISCV/blockaddress.ll b/llvm/test/CodeGen/RISCV/blockaddress.ll index d70f314..5b7f818 100644 --- a/llvm/test/CodeGen/RISCV/blockaddress.ll +++ b/llvm/test/CodeGen/RISCV/blockaddress.ll @@ -9,11 +9,11 @@ define void @test_blockaddress() nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: lui a0, %hi(addr) -; RV32I-NEXT: lui a1, %hi(.Ltmp0) -; RV32I-NEXT: addi a1, a1, %lo(.Ltmp0) -; RV32I-NEXT: sw a1, %lo(addr)(a0) -; RV32I-NEXT: lw a0, %lo(addr)(a0) +; RV32I-NEXT: lui a0, %hi(.Ltmp0) +; RV32I-NEXT: addi a0, a0, %lo(.Ltmp0) +; RV32I-NEXT: lui a1, %hi(addr) +; RV32I-NEXT: sw a0, %lo(addr)(a1) +; RV32I-NEXT: lw a0, %lo(addr)(a1) ; RV32I-NEXT: jr a0 ; RV32I-NEXT: .Ltmp0: # Block address taken ; RV32I-NEXT: .LBB0_1: # %block diff --git a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll index d896d9f..0a29f24 100644 --- a/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll +++ b/llvm/test/CodeGen/RISCV/bswap-ctlz-cttz-ctpop.ll @@ -29,10 +29,10 @@ define i16 @test_bswap_i16(i16 %a) nounwind { define i32 @test_bswap_i32(i32 %a) nounwind { ; RV32I-LABEL: test_bswap_i32: ; RV32I: # %bb.0: -; RV32I-NEXT: srli a1, a0, 8 -; RV32I-NEXT: lui a2, 16 -; RV32I-NEXT: addi a2, a2, -256 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi a1, a1, -256 +; RV32I-NEXT: srli a2, a0, 8 +; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: srli a2, a0, 24 ; RV32I-NEXT: or a1, a1, a2 ; RV32I-NEXT: slli a2, a0, 8 @@ -49,9 +49,9 @@ define i32 @test_bswap_i32(i32 %a) nounwind { define i64 @test_bswap_i64(i64 %a) nounwind { ; RV32I-LABEL: test_bswap_i64: ; RV32I: # %bb.0: +; RV32I-NEXT: lui a2, 16 +; RV32I-NEXT: addi a3, a2, -256 ; RV32I-NEXT: srli a2, a1, 8 -; RV32I-NEXT: lui a3, 16 -; RV32I-NEXT: addi a3, a3, -256 ; RV32I-NEXT: and a2, a2, a3 ; RV32I-NEXT: srli a4, a1, 24 ; RV32I-NEXT: or a2, a2, a4 @@ -87,10 +87,10 @@ define i8 @test_cttz_i8(i8 %a) nounwind { ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: srli a1, a0, 1 -; RV32I-NEXT: lui a2, 349525 -; RV32I-NEXT: addi a2, a2, 1365 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: lui a1, 209715 ; RV32I-NEXT: addi a1, a1, 819 @@ -131,10 +131,10 @@ define i16 @test_cttz_i16(i16 %a) nounwind { ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: srli a1, a0, 1 -; RV32I-NEXT: lui a2, 349525 -; RV32I-NEXT: addi a2, a2, 1365 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: lui a1, 209715 ; RV32I-NEXT: addi a1, a1, 819 @@ -172,10 +172,10 @@ define i32 @test_cttz_i32(i32 %a) nounwind { ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: srli a1, a0, 1 -; RV32I-NEXT: lui a2, 349525 -; RV32I-NEXT: addi a2, a2, 1365 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: lui a1, 209715 ; RV32I-NEXT: addi a1, a1, 819 @@ -220,11 +220,11 @@ define i32 @test_ctlz_i32(i32 %a) nounwind { ; RV32I-NEXT: or a0, a0, a1 ; RV32I-NEXT: srli a1, a0, 16 ; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 ; RV32I-NEXT: not a0, a0 -; RV32I-NEXT: srli a1, a0, 1 -; RV32I-NEXT: lui a2, 349525 -; RV32I-NEXT: addi a2, a2, 1365 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: lui a1, 209715 ; RV32I-NEXT: addi a1, a1, 819 @@ -269,9 +269,9 @@ define i64 @test_cttz_i64(i64 %a) nounwind { ; RV32I-NEXT: addi a0, a0, -1 ; RV32I-NEXT: not a1, s4 ; RV32I-NEXT: and a0, a1, a0 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi s5, a1, 1365 ; RV32I-NEXT: srli a1, a0, 1 -; RV32I-NEXT: lui a2, 349525 -; RV32I-NEXT: addi s5, a2, 1365 ; RV32I-NEXT: and a1, a1, s5 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: lui a1, 209715 @@ -282,12 +282,12 @@ define i64 @test_cttz_i64(i64 %a) nounwind { ; RV32I-NEXT: add a0, a1, a0 ; RV32I-NEXT: srli a1, a0, 4 ; RV32I-NEXT: add a0, a0, a1 -; RV32I-NEXT: lui a1, 61681 -; RV32I-NEXT: addi s6, a1, -241 -; RV32I-NEXT: and a0, a0, s6 ; RV32I-NEXT: lui a1, 4112 -; RV32I-NEXT: addi s1, a1, 257 -; RV32I-NEXT: mv a1, s1 +; RV32I-NEXT: addi s6, a1, 257 +; RV32I-NEXT: lui a1, 61681 +; RV32I-NEXT: addi s1, a1, -241 +; RV32I-NEXT: and a0, a0, s1 +; RV32I-NEXT: mv a1, s6 ; RV32I-NEXT: call __mulsi3 ; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: addi a0, s3, -1 @@ -302,8 +302,8 @@ define i64 @test_cttz_i64(i64 %a) nounwind { ; RV32I-NEXT: add a0, a1, a0 ; RV32I-NEXT: srli a1, a0, 4 ; RV32I-NEXT: add a0, a0, a1 -; RV32I-NEXT: and a0, a0, s6 -; RV32I-NEXT: mv a1, s1 +; RV32I-NEXT: and a0, a0, s1 +; RV32I-NEXT: mv a1, s6 ; RV32I-NEXT: call __mulsi3 ; RV32I-NEXT: bnez s4, .LBB7_2 ; RV32I-NEXT: # %bb.1: @@ -336,10 +336,10 @@ define i8 @test_cttz_i8_zero_undef(i8 %a) nounwind { ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: srli a1, a0, 1 -; RV32I-NEXT: lui a2, 349525 -; RV32I-NEXT: addi a2, a2, 1365 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: lui a1, 209715 ; RV32I-NEXT: addi a1, a1, 819 @@ -371,10 +371,10 @@ define i16 @test_cttz_i16_zero_undef(i16 %a) nounwind { ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: srli a1, a0, 1 -; RV32I-NEXT: lui a2, 349525 -; RV32I-NEXT: addi a2, a2, 1365 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: lui a1, 209715 ; RV32I-NEXT: addi a1, a1, 819 @@ -406,10 +406,10 @@ define i32 @test_cttz_i32_zero_undef(i32 %a) nounwind { ; RV32I-NEXT: addi a1, a0, -1 ; RV32I-NEXT: not a0, a0 ; RV32I-NEXT: and a0, a0, a1 -; RV32I-NEXT: srli a1, a0, 1 -; RV32I-NEXT: lui a2, 349525 -; RV32I-NEXT: addi a2, a2, 1365 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: lui a1, 209715 ; RV32I-NEXT: addi a1, a1, 819 @@ -450,9 +450,9 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind { ; RV32I-NEXT: addi a0, a0, -1 ; RV32I-NEXT: not a1, s4 ; RV32I-NEXT: and a0, a1, a0 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi s5, a1, 1365 ; RV32I-NEXT: srli a1, a0, 1 -; RV32I-NEXT: lui a2, 349525 -; RV32I-NEXT: addi s5, a2, 1365 ; RV32I-NEXT: and a1, a1, s5 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: lui a1, 209715 @@ -463,12 +463,12 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind { ; RV32I-NEXT: add a0, a1, a0 ; RV32I-NEXT: srli a1, a0, 4 ; RV32I-NEXT: add a0, a0, a1 -; RV32I-NEXT: lui a1, 61681 -; RV32I-NEXT: addi s6, a1, -241 -; RV32I-NEXT: and a0, a0, s6 ; RV32I-NEXT: lui a1, 4112 -; RV32I-NEXT: addi s1, a1, 257 -; RV32I-NEXT: mv a1, s1 +; RV32I-NEXT: addi s6, a1, 257 +; RV32I-NEXT: lui a1, 61681 +; RV32I-NEXT: addi s1, a1, -241 +; RV32I-NEXT: and a0, a0, s1 +; RV32I-NEXT: mv a1, s6 ; RV32I-NEXT: call __mulsi3 ; RV32I-NEXT: mv s2, a0 ; RV32I-NEXT: addi a0, s3, -1 @@ -483,8 +483,8 @@ define i64 @test_cttz_i64_zero_undef(i64 %a) nounwind { ; RV32I-NEXT: add a0, a1, a0 ; RV32I-NEXT: srli a1, a0, 4 ; RV32I-NEXT: add a0, a0, a1 -; RV32I-NEXT: and a0, a0, s6 -; RV32I-NEXT: mv a1, s1 +; RV32I-NEXT: and a0, a0, s1 +; RV32I-NEXT: mv a1, s6 ; RV32I-NEXT: call __mulsi3 ; RV32I-NEXT: bnez s4, .LBB11_2 ; RV32I-NEXT: # %bb.1: @@ -514,10 +514,10 @@ define i32 @test_ctpop_i32(i32 %a) nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) -; RV32I-NEXT: srli a1, a0, 1 -; RV32I-NEXT: lui a2, 349525 -; RV32I-NEXT: addi a2, a2, 1365 -; RV32I-NEXT: and a1, a1, a2 +; RV32I-NEXT: lui a1, 349525 +; RV32I-NEXT: addi a1, a1, 1365 +; RV32I-NEXT: srli a2, a0, 1 +; RV32I-NEXT: and a1, a2, a1 ; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: lui a1, 209715 ; RV32I-NEXT: addi a1, a1, 819 diff --git a/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll b/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll index 56d3ff0..ac1f4da 100644 --- a/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll +++ b/llvm/test/CodeGen/RISCV/callee-saved-fpr32s.ll @@ -23,8 +23,8 @@ define void @callee() nounwind { ; ILP32-LP64-LABEL: callee: ; ILP32-LP64: # %bb.0: ; ILP32-LP64-NEXT: lui a0, %hi(var) -; ILP32-LP64-NEXT: flw ft0, %lo(var)(a0) ; ILP32-LP64-NEXT: addi a1, a0, %lo(var) +; ILP32-LP64-NEXT: flw ft0, %lo(var)(a0) ; ILP32-LP64-NEXT: flw ft1, 4(a1) ; ILP32-LP64-NEXT: flw ft2, 8(a1) ; ILP32-LP64-NEXT: flw ft3, 12(a1) @@ -52,14 +52,14 @@ define void @callee() nounwind { ; ILP32-LP64-NEXT: flw fs5, 100(a1) ; ILP32-LP64-NEXT: flw fs6, 104(a1) ; ILP32-LP64-NEXT: flw fs7, 108(a1) -; ILP32-LP64-NEXT: flw fs8, 124(a1) -; ILP32-LP64-NEXT: flw fs9, 120(a1) -; ILP32-LP64-NEXT: flw fs10, 116(a1) -; ILP32-LP64-NEXT: flw fs11, 112(a1) -; ILP32-LP64-NEXT: fsw fs8, 124(a1) -; ILP32-LP64-NEXT: fsw fs9, 120(a1) -; ILP32-LP64-NEXT: fsw fs10, 116(a1) -; ILP32-LP64-NEXT: fsw fs11, 112(a1) +; ILP32-LP64-NEXT: flw fs8, 112(a1) +; ILP32-LP64-NEXT: flw fs9, 116(a1) +; ILP32-LP64-NEXT: flw fs10, 120(a1) +; ILP32-LP64-NEXT: flw fs11, 124(a1) +; ILP32-LP64-NEXT: fsw fs11, 124(a1) +; ILP32-LP64-NEXT: fsw fs10, 120(a1) +; ILP32-LP64-NEXT: fsw fs9, 116(a1) +; ILP32-LP64-NEXT: fsw fs8, 112(a1) ; ILP32-LP64-NEXT: fsw fs7, 108(a1) ; ILP32-LP64-NEXT: fsw fs6, 104(a1) ; ILP32-LP64-NEXT: fsw fs5, 100(a1) @@ -106,7 +106,6 @@ define void @callee() nounwind { ; ILP32F-LP64F-NEXT: fsw fs10, 4(sp) ; ILP32F-LP64F-NEXT: fsw fs11, 0(sp) ; ILP32F-LP64F-NEXT: lui a0, %hi(var) -; ILP32F-LP64F-NEXT: flw ft0, %lo(var)(a0) ; ILP32F-LP64F-NEXT: addi a1, a0, %lo(var) ; ; ILP32D-LP64D-LABEL: callee: @@ -125,7 +124,6 @@ define void @callee() nounwind { ; ILP32D-LP64D-NEXT: fsd fs10, 8(sp) ; ILP32D-LP64D-NEXT: fsd fs11, 0(sp) ; ILP32D-LP64D-NEXT: lui a0, %hi(var) -; ILP32D-LP64D-NEXT: flw ft0, %lo(var)(a0) ; ILP32D-LP64D-NEXT: addi a1, a0, %lo(var) %val = load [32 x float], [32 x float]* @var store volatile [32 x float] %val, [32 x float]* @var diff --git a/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll b/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll index f95bc45..cc51b10 100644 --- a/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll +++ b/llvm/test/CodeGen/RISCV/callee-saved-fpr64s.ll @@ -19,8 +19,8 @@ define void @callee() nounwind { ; ILP32-LP64-LABEL: callee: ; ILP32-LP64: # %bb.0: ; ILP32-LP64-NEXT: lui a0, %hi(var) -; ILP32-LP64-NEXT: fld ft0, %lo(var)(a0) ; ILP32-LP64-NEXT: addi a1, a0, %lo(var) +; ILP32-LP64-NEXT: fld ft0, %lo(var)(a0) ; ILP32-LP64-NEXT: fld ft1, 8(a1) ; ILP32-LP64-NEXT: fld ft2, 16(a1) ; ILP32-LP64-NEXT: fld ft3, 24(a1) @@ -48,14 +48,14 @@ define void @callee() nounwind { ; ILP32-LP64-NEXT: fld fs5, 200(a1) ; ILP32-LP64-NEXT: fld fs6, 208(a1) ; ILP32-LP64-NEXT: fld fs7, 216(a1) -; ILP32-LP64-NEXT: fld fs8, 248(a1) -; ILP32-LP64-NEXT: fld fs9, 240(a1) -; ILP32-LP64-NEXT: fld fs10, 232(a1) -; ILP32-LP64-NEXT: fld fs11, 224(a1) -; ILP32-LP64-NEXT: fsd fs8, 248(a1) -; ILP32-LP64-NEXT: fsd fs9, 240(a1) -; ILP32-LP64-NEXT: fsd fs10, 232(a1) -; ILP32-LP64-NEXT: fsd fs11, 224(a1) +; ILP32-LP64-NEXT: fld fs8, 224(a1) +; ILP32-LP64-NEXT: fld fs9, 232(a1) +; ILP32-LP64-NEXT: fld fs10, 240(a1) +; ILP32-LP64-NEXT: fld fs11, 248(a1) +; ILP32-LP64-NEXT: fsd fs11, 248(a1) +; ILP32-LP64-NEXT: fsd fs10, 240(a1) +; ILP32-LP64-NEXT: fsd fs9, 232(a1) +; ILP32-LP64-NEXT: fsd fs8, 224(a1) ; ILP32-LP64-NEXT: fsd fs7, 216(a1) ; ILP32-LP64-NEXT: fsd fs6, 208(a1) ; ILP32-LP64-NEXT: fsd fs5, 200(a1) @@ -102,7 +102,6 @@ define void @callee() nounwind { ; ILP32D-LP64D-NEXT: fsd fs10, 8(sp) ; ILP32D-LP64D-NEXT: fsd fs11, 0(sp) ; ILP32D-LP64D-NEXT: lui a0, %hi(var) -; ILP32D-LP64D-NEXT: fld ft0, %lo(var)(a0) ; ILP32D-LP64D-NEXT: addi a1, a0, %lo(var) %val = load [32 x double], [32 x double]* @var store volatile [32 x double] %val, [32 x double]* @var diff --git a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll index 3cfb58d..ad8f5a3 100644 --- a/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll +++ b/llvm/test/CodeGen/RISCV/callee-saved-gprs.ll @@ -41,9 +41,7 @@ define void @callee() nounwind { ; RV32I-NEXT: sw s10, 36(sp) ; RV32I-NEXT: sw s11, 32(sp) ; RV32I-NEXT: lui a0, %hi(var) -; RV32I-NEXT: lw a1, %lo(var)(a0) -; RV32I-NEXT: sw a1, 28(sp) -; RV32I-NEXT: addi a2, a0, %lo(var) +; RV32I-NEXT: addi a1, a0, %lo(var) ; ; RV32I-WITH-FP-LABEL: callee: ; RV32I-WITH-FP: # %bb.0: @@ -63,9 +61,7 @@ define void @callee() nounwind { ; RV32I-WITH-FP-NEXT: sw s11, 28(sp) ; RV32I-WITH-FP-NEXT: addi s0, sp, 80 ; RV32I-WITH-FP-NEXT: lui a0, %hi(var) -; RV32I-WITH-FP-NEXT: lw a1, %lo(var)(a0) -; RV32I-WITH-FP-NEXT: sw a1, -56(s0) -; RV32I-WITH-FP-NEXT: addi a2, a0, %lo(var) +; RV32I-WITH-FP-NEXT: addi a1, a0, %lo(var) ; ; RV64I-LABEL: callee: ; RV64I: # %bb.0: @@ -83,9 +79,7 @@ define void @callee() nounwind { ; RV64I-NEXT: sd s10, 56(sp) ; RV64I-NEXT: sd s11, 48(sp) ; RV64I-NEXT: lui a0, %hi(var) -; RV64I-NEXT: lw a1, %lo(var)(a0) -; RV64I-NEXT: sd a1, 40(sp) -; RV64I-NEXT: addi a2, a0, %lo(var) +; RV64I-NEXT: addi a1, a0, %lo(var) ; ; RV64I-WITH-FP-LABEL: callee: ; RV64I-WITH-FP: # %bb.0: @@ -105,9 +99,7 @@ define void @callee() nounwind { ; RV64I-WITH-FP-NEXT: sd s11, 56(sp) ; RV64I-WITH-FP-NEXT: addi s0, sp, 160 ; RV64I-WITH-FP-NEXT: lui a0, %hi(var) -; RV64I-WITH-FP-NEXT: lw a1, %lo(var)(a0) -; RV64I-WITH-FP-NEXT: sd a1, -112(s0) -; RV64I-WITH-FP-NEXT: addi a2, a0, %lo(var) +; RV64I-WITH-FP-NEXT: addi a1, a0, %lo(var) %val = load [32 x i32], [32 x i32]* @var store volatile [32 x i32] %val, [32 x i32]* @var ret void @@ -119,41 +111,36 @@ define void @callee() nounwind { define void @caller() nounwind { ; RV32I-LABEL: caller: ; RV32I: lui a0, %hi(var) -; RV32I-NEXT: lw a1, %lo(var)(a0) -; RV32I-NEXT: sw a1, 88(sp) -; RV32I-NEXT: addi s0, a0, %lo(var) - +; RV32I-NEXT: addi s1, a0, %lo(var) ; RV32I: sw a0, 8(sp) -; RV32I-NEXT: lw s2, 84(s0) -; RV32I-NEXT: lw s3, 88(s0) -; RV32I-NEXT: lw s4, 92(s0) -; RV32I-NEXT: lw s5, 96(s0) -; RV32I-NEXT: lw s6, 100(s0) -; RV32I-NEXT: lw s7, 104(s0) -; RV32I-NEXT: lw s8, 108(s0) -; RV32I-NEXT: lw s9, 112(s0) -; RV32I-NEXT: lw s10, 116(s0) -; RV32I-NEXT: lw s11, 120(s0) -; RV32I-NEXT: lw s1, 124(s0) +; RV32I-NEXT: lw s2, 84(s1) +; RV32I-NEXT: lw s3, 88(s1) +; RV32I-NEXT: lw s4, 92(s1) +; RV32I-NEXT: lw s5, 96(s1) +; RV32I-NEXT: lw s6, 100(s1) +; RV32I-NEXT: lw s7, 104(s1) +; RV32I-NEXT: lw s8, 108(s1) +; RV32I-NEXT: lw s9, 112(s1) +; RV32I-NEXT: lw s10, 116(s1) +; RV32I-NEXT: lw s11, 120(s1) +; RV32I-NEXT: lw s0, 124(s1) ; RV32I-NEXT: call callee -; RV32I-NEXT: sw s1, 124(s0) -; RV32I-NEXT: sw s11, 120(s0) -; RV32I-NEXT: sw s10, 116(s0) -; RV32I-NEXT: sw s9, 112(s0) -; RV32I-NEXT: sw s8, 108(s0) -; RV32I-NEXT: sw s7, 104(s0) -; RV32I-NEXT: sw s6, 100(s0) -; RV32I-NEXT: sw s5, 96(s0) -; RV32I-NEXT: sw s4, 92(s0) -; RV32I-NEXT: sw s3, 88(s0) -; RV32I-NEXT: sw s2, 84(s0) +; RV32I-NEXT: sw s0, 124(s1) +; RV32I-NEXT: sw s11, 120(s1) +; RV32I-NEXT: sw s10, 116(s1) +; RV32I-NEXT: sw s9, 112(s1) +; RV32I-NEXT: sw s8, 108(s1) +; RV32I-NEXT: sw s7, 104(s1) +; RV32I-NEXT: sw s6, 100(s1) +; RV32I-NEXT: sw s5, 96(s1) +; RV32I-NEXT: sw s4, 92(s1) +; RV32I-NEXT: sw s3, 88(s1) +; RV32I-NEXT: sw s2, 84(s1) ; RV32I-NEXT: lw a0, 8(sp) ; ; RV32I-WITH-FP-LABEL: caller: ; RV32I-WITH-FP: addi s0, sp, 144 ; RV32I-WITH-FP-NEXT: lui a0, %hi(var) -; RV32I-WITH-FP-NEXT: lw a1, %lo(var)(a0) -; RV32I-WITH-FP-NEXT: sw a1, -56(s0) ; RV32I-WITH-FP-NEXT: addi s1, a0, %lo(var) ; RV32I-WITH-FP: sw a0, -140(s0) ; RV32I-WITH-FP-NEXT: lw s5, 88(s1) @@ -181,40 +168,36 @@ define void @caller() nounwind { ; ; RV64I-LABEL: caller: ; RV64I: lui a0, %hi(var) -; RV64I-NEXT: lw a1, %lo(var)(a0) -; RV64I-NEXT: sd a1, 160(sp) -; RV64I-NEXT: addi s0, a0, %lo(var) +; RV64I-NEXT: addi s1, a0, %lo(var) ; RV64I: sd a0, 0(sp) -; RV64I-NEXT: lw s2, 84(s0) -; RV64I-NEXT: lw s3, 88(s0) -; RV64I-NEXT: lw s4, 92(s0) -; RV64I-NEXT: lw s5, 96(s0) -; RV64I-NEXT: lw s6, 100(s0) -; RV64I-NEXT: lw s7, 104(s0) -; RV64I-NEXT: lw s8, 108(s0) -; RV64I-NEXT: lw s9, 112(s0) -; RV64I-NEXT: lw s10, 116(s0) -; RV64I-NEXT: lw s11, 120(s0) -; RV64I-NEXT: lw s1, 124(s0) +; RV64I-NEXT: lw s2, 84(s1) +; RV64I-NEXT: lw s3, 88(s1) +; RV64I-NEXT: lw s4, 92(s1) +; RV64I-NEXT: lw s5, 96(s1) +; RV64I-NEXT: lw s6, 100(s1) +; RV64I-NEXT: lw s7, 104(s1) +; RV64I-NEXT: lw s8, 108(s1) +; RV64I-NEXT: lw s9, 112(s1) +; RV64I-NEXT: lw s10, 116(s1) +; RV64I-NEXT: lw s11, 120(s1) +; RV64I-NEXT: lw s0, 124(s1) ; RV64I-NEXT: call callee -; RV64I-NEXT: sw s1, 124(s0) -; RV64I-NEXT: sw s11, 120(s0) -; RV64I-NEXT: sw s10, 116(s0) -; RV64I-NEXT: sw s9, 112(s0) -; RV64I-NEXT: sw s8, 108(s0) -; RV64I-NEXT: sw s7, 104(s0) -; RV64I-NEXT: sw s6, 100(s0) -; RV64I-NEXT: sw s5, 96(s0) -; RV64I-NEXT: sw s4, 92(s0) -; RV64I-NEXT: sw s3, 88(s0) -; RV64I-NEXT: sw s2, 84(s0) +; RV64I-NEXT: sw s0, 124(s1) +; RV64I-NEXT: sw s11, 120(s1) +; RV64I-NEXT: sw s10, 116(s1) +; RV64I-NEXT: sw s9, 112(s1) +; RV64I-NEXT: sw s8, 108(s1) +; RV64I-NEXT: sw s7, 104(s1) +; RV64I-NEXT: sw s6, 100(s1) +; RV64I-NEXT: sw s5, 96(s1) +; RV64I-NEXT: sw s4, 92(s1) +; RV64I-NEXT: sw s3, 88(s1) +; RV64I-NEXT: sw s2, 84(s1) ; RV64I-NEXT: ld a0, 0(sp) ; ; RV64I-WITH-FP-LABEL: caller: ; RV64I-WITH-FP: addi s0, sp, 288 ; RV64I-WITH-FP-NEXT: lui a0, %hi(var) -; RV64I-WITH-FP-NEXT: lw a1, %lo(var)(a0) -; RV64I-WITH-FP-NEXT: sd a1, -112(s0) ; RV64I-WITH-FP-NEXT: addi s1, a0, %lo(var) ; RV64I-WITH-FP: sd a0, -280(s0) ; RV64I-WITH-FP-NEXT: lw s5, 88(s1) diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll index 24788e1..c93edca 100644 --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-common.ll @@ -61,8 +61,8 @@ define i32 @caller_double_in_regs() nounwind { ; RV32I-FPELIM-NEXT: addi sp, sp, -16 ; RV32I-FPELIM-NEXT: sw ra, 12(sp) ; RV32I-FPELIM-NEXT: addi a0, zero, 1 -; RV32I-FPELIM-NEXT: lui a2, 262144 ; RV32I-FPELIM-NEXT: mv a1, zero +; RV32I-FPELIM-NEXT: lui a2, 262144 ; RV32I-FPELIM-NEXT: call callee_double_in_regs ; RV32I-FPELIM-NEXT: lw ra, 12(sp) ; RV32I-FPELIM-NEXT: addi sp, sp, 16 @@ -75,8 +75,8 @@ define i32 @caller_double_in_regs() nounwind { ; RV32I-WITHFP-NEXT: sw s0, 8(sp) ; RV32I-WITHFP-NEXT: addi s0, sp, 16 ; RV32I-WITHFP-NEXT: addi a0, zero, 1 -; RV32I-WITHFP-NEXT: lui a2, 262144 ; RV32I-WITHFP-NEXT: mv a1, zero +; RV32I-WITHFP-NEXT: lui a2, 262144 ; RV32I-WITHFP-NEXT: call callee_double_in_regs ; RV32I-WITHFP-NEXT: lw s0, 8(sp) ; RV32I-WITHFP-NEXT: lw ra, 12(sp) @@ -94,14 +94,14 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 % ; RV32I-FPELIM-LABEL: callee_aligned_stack: ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: lw a0, 0(a2) -; RV32I-FPELIM-NEXT: lw a1, 20(sp) -; RV32I-FPELIM-NEXT: lw a2, 0(sp) -; RV32I-FPELIM-NEXT: lw a3, 8(sp) -; RV32I-FPELIM-NEXT: lw a4, 16(sp) ; RV32I-FPELIM-NEXT: add a0, a0, a7 -; RV32I-FPELIM-NEXT: add a0, a0, a2 -; RV32I-FPELIM-NEXT: add a0, a0, a3 -; RV32I-FPELIM-NEXT: add a0, a0, a4 +; RV32I-FPELIM-NEXT: lw a1, 0(sp) +; RV32I-FPELIM-NEXT: add a0, a0, a1 +; RV32I-FPELIM-NEXT: lw a1, 8(sp) +; RV32I-FPELIM-NEXT: add a0, a0, a1 +; RV32I-FPELIM-NEXT: lw a1, 16(sp) +; RV32I-FPELIM-NEXT: add a0, a0, a1 +; RV32I-FPELIM-NEXT: lw a1, 20(sp) ; RV32I-FPELIM-NEXT: add a0, a0, a1 ; RV32I-FPELIM-NEXT: ret ; @@ -112,14 +112,14 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 % ; RV32I-WITHFP-NEXT: sw s0, 8(sp) ; RV32I-WITHFP-NEXT: addi s0, sp, 16 ; RV32I-WITHFP-NEXT: lw a0, 0(a2) -; RV32I-WITHFP-NEXT: lw a1, 20(s0) -; RV32I-WITHFP-NEXT: lw a2, 0(s0) -; RV32I-WITHFP-NEXT: lw a3, 8(s0) -; RV32I-WITHFP-NEXT: lw a4, 16(s0) ; RV32I-WITHFP-NEXT: add a0, a0, a7 -; RV32I-WITHFP-NEXT: add a0, a0, a2 -; RV32I-WITHFP-NEXT: add a0, a0, a3 -; RV32I-WITHFP-NEXT: add a0, a0, a4 +; RV32I-WITHFP-NEXT: lw a1, 0(s0) +; RV32I-WITHFP-NEXT: add a0, a0, a1 +; RV32I-WITHFP-NEXT: lw a1, 8(s0) +; RV32I-WITHFP-NEXT: add a0, a0, a1 +; RV32I-WITHFP-NEXT: lw a1, 16(s0) +; RV32I-WITHFP-NEXT: add a0, a0, a1 +; RV32I-WITHFP-NEXT: lw a1, 20(s0) ; RV32I-WITHFP-NEXT: add a0, a0, a1 ; RV32I-WITHFP-NEXT: lw s0, 8(sp) ; RV32I-WITHFP-NEXT: lw ra, 12(sp) @@ -169,7 +169,8 @@ define void @caller_aligned_stack() nounwind { ; RV32I-FPELIM-NEXT: addi a0, a0, -328 ; RV32I-FPELIM-NEXT: sw a0, 36(sp) ; RV32I-FPELIM-NEXT: lui a0, 335544 -; RV32I-FPELIM-NEXT: addi t0, a0, 1311 +; RV32I-FPELIM-NEXT: addi a0, a0, 1311 +; RV32I-FPELIM-NEXT: sw a0, 32(sp) ; RV32I-FPELIM-NEXT: lui a0, 688509 ; RV32I-FPELIM-NEXT: addi a5, a0, -2048 ; RV32I-FPELIM-NEXT: addi a2, sp, 32 @@ -179,7 +180,6 @@ define void @caller_aligned_stack() nounwind { ; RV32I-FPELIM-NEXT: addi a4, zero, 13 ; RV32I-FPELIM-NEXT: addi a6, zero, 4 ; RV32I-FPELIM-NEXT: addi a7, zero, 14 -; RV32I-FPELIM-NEXT: sw t0, 32(sp) ; RV32I-FPELIM-NEXT: call callee_aligned_stack ; RV32I-FPELIM-NEXT: lw ra, 60(sp) ; RV32I-FPELIM-NEXT: addi sp, sp, 64 @@ -215,7 +215,8 @@ define void @caller_aligned_stack() nounwind { ; RV32I-WITHFP-NEXT: addi a0, a0, -328 ; RV32I-WITHFP-NEXT: sw a0, -28(s0) ; RV32I-WITHFP-NEXT: lui a0, 335544 -; RV32I-WITHFP-NEXT: addi t0, a0, 1311 +; RV32I-WITHFP-NEXT: addi a0, a0, 1311 +; RV32I-WITHFP-NEXT: sw a0, -32(s0) ; RV32I-WITHFP-NEXT: lui a0, 688509 ; RV32I-WITHFP-NEXT: addi a5, a0, -2048 ; RV32I-WITHFP-NEXT: addi a2, s0, -32 @@ -225,7 +226,6 @@ define void @caller_aligned_stack() nounwind { ; RV32I-WITHFP-NEXT: addi a4, zero, 13 ; RV32I-WITHFP-NEXT: addi a6, zero, 4 ; RV32I-WITHFP-NEXT: addi a7, zero, 14 -; RV32I-WITHFP-NEXT: sw t0, -32(s0) ; RV32I-WITHFP-NEXT: call callee_aligned_stack ; RV32I-WITHFP-NEXT: lw s0, 56(sp) ; RV32I-WITHFP-NEXT: lw ra, 60(sp) @@ -241,8 +241,8 @@ define void @caller_aligned_stack() nounwind { define double @callee_small_scalar_ret() nounwind { ; RV32I-FPELIM-LABEL: callee_small_scalar_ret: ; RV32I-FPELIM: # %bb.0: -; RV32I-FPELIM-NEXT: lui a1, 261888 ; RV32I-FPELIM-NEXT: mv a0, zero +; RV32I-FPELIM-NEXT: lui a1, 261888 ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: callee_small_scalar_ret: @@ -251,8 +251,8 @@ define double @callee_small_scalar_ret() nounwind { ; RV32I-WITHFP-NEXT: sw ra, 12(sp) ; RV32I-WITHFP-NEXT: sw s0, 8(sp) ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: lui a1, 261888 ; RV32I-WITHFP-NEXT: mv a0, zero +; RV32I-WITHFP-NEXT: lui a1, 261888 ; RV32I-WITHFP-NEXT: lw s0, 8(sp) ; RV32I-WITHFP-NEXT: lw ra, 12(sp) ; RV32I-WITHFP-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll index e86a8c7..10b0d54 100644 --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32-ilp32f-ilp32d-common.ll @@ -82,22 +82,22 @@ define i32 @caller_i64_in_regs() nounwind { define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i64 %g, i32 %h) nounwind { ; RV32I-FPELIM-LABEL: callee_many_scalars: ; RV32I-FPELIM: # %bb.0: -; RV32I-FPELIM-NEXT: lw t0, 4(sp) -; RV32I-FPELIM-NEXT: lw t1, 0(sp) -; RV32I-FPELIM-NEXT: andi t2, a0, 255 -; RV32I-FPELIM-NEXT: lui a0, 16 -; RV32I-FPELIM-NEXT: addi a0, a0, -1 -; RV32I-FPELIM-NEXT: and a0, a1, a0 -; RV32I-FPELIM-NEXT: add a0, t2, a0 +; RV32I-FPELIM-NEXT: lw t0, 0(sp) +; RV32I-FPELIM-NEXT: xor a4, a4, t0 +; RV32I-FPELIM-NEXT: xor a3, a3, a7 +; RV32I-FPELIM-NEXT: or a3, a3, a4 +; RV32I-FPELIM-NEXT: lui a4, 16 +; RV32I-FPELIM-NEXT: addi a4, a4, -1 +; RV32I-FPELIM-NEXT: and a1, a1, a4 +; RV32I-FPELIM-NEXT: andi a0, a0, 255 +; RV32I-FPELIM-NEXT: add a0, a0, a1 ; RV32I-FPELIM-NEXT: add a0, a0, a2 -; RV32I-FPELIM-NEXT: xor a1, a4, t1 -; RV32I-FPELIM-NEXT: xor a2, a3, a7 -; RV32I-FPELIM-NEXT: or a1, a2, a1 -; RV32I-FPELIM-NEXT: seqz a1, a1 +; RV32I-FPELIM-NEXT: seqz a1, a3 ; RV32I-FPELIM-NEXT: add a0, a1, a0 ; RV32I-FPELIM-NEXT: add a0, a0, a5 ; RV32I-FPELIM-NEXT: add a0, a0, a6 -; RV32I-FPELIM-NEXT: add a0, a0, t0 +; RV32I-FPELIM-NEXT: lw a1, 4(sp) +; RV32I-FPELIM-NEXT: add a0, a0, a1 ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: callee_many_scalars: @@ -106,22 +106,22 @@ define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i64 %d, i32 %e, i32 %f, i ; RV32I-WITHFP-NEXT: sw ra, 12(sp) ; RV32I-WITHFP-NEXT: sw s0, 8(sp) ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: lw t0, 4(s0) -; RV32I-WITHFP-NEXT: lw t1, 0(s0) -; RV32I-WITHFP-NEXT: andi t2, a0, 255 -; RV32I-WITHFP-NEXT: lui a0, 16 -; RV32I-WITHFP-NEXT: addi a0, a0, -1 -; RV32I-WITHFP-NEXT: and a0, a1, a0 -; RV32I-WITHFP-NEXT: add a0, t2, a0 +; RV32I-WITHFP-NEXT: lw t0, 0(s0) +; RV32I-WITHFP-NEXT: xor a4, a4, t0 +; RV32I-WITHFP-NEXT: xor a3, a3, a7 +; RV32I-WITHFP-NEXT: or a3, a3, a4 +; RV32I-WITHFP-NEXT: lui a4, 16 +; RV32I-WITHFP-NEXT: addi a4, a4, -1 +; RV32I-WITHFP-NEXT: and a1, a1, a4 +; RV32I-WITHFP-NEXT: andi a0, a0, 255 +; RV32I-WITHFP-NEXT: add a0, a0, a1 ; RV32I-WITHFP-NEXT: add a0, a0, a2 -; RV32I-WITHFP-NEXT: xor a1, a4, t1 -; RV32I-WITHFP-NEXT: xor a2, a3, a7 -; RV32I-WITHFP-NEXT: or a1, a2, a1 -; RV32I-WITHFP-NEXT: seqz a1, a1 +; RV32I-WITHFP-NEXT: seqz a1, a3 ; RV32I-WITHFP-NEXT: add a0, a1, a0 ; RV32I-WITHFP-NEXT: add a0, a0, a5 ; RV32I-WITHFP-NEXT: add a0, a0, a6 -; RV32I-WITHFP-NEXT: add a0, a0, t0 +; RV32I-WITHFP-NEXT: lw a1, 4(s0) +; RV32I-WITHFP-NEXT: add a0, a0, a1 ; RV32I-WITHFP-NEXT: lw s0, 8(sp) ; RV32I-WITHFP-NEXT: lw ra, 12(sp) ; RV32I-WITHFP-NEXT: addi sp, sp, 16 @@ -146,15 +146,15 @@ define i32 @caller_many_scalars() nounwind { ; RV32I-FPELIM-NEXT: sw ra, 12(sp) ; RV32I-FPELIM-NEXT: addi a0, zero, 8 ; RV32I-FPELIM-NEXT: sw a0, 4(sp) +; RV32I-FPELIM-NEXT: sw zero, 0(sp) ; RV32I-FPELIM-NEXT: addi a0, zero, 1 ; RV32I-FPELIM-NEXT: addi a1, zero, 2 ; RV32I-FPELIM-NEXT: addi a2, zero, 3 ; RV32I-FPELIM-NEXT: addi a3, zero, 4 +; RV32I-FPELIM-NEXT: mv a4, zero ; RV32I-FPELIM-NEXT: addi a5, zero, 5 ; RV32I-FPELIM-NEXT: addi a6, zero, 6 ; RV32I-FPELIM-NEXT: addi a7, zero, 7 -; RV32I-FPELIM-NEXT: sw zero, 0(sp) -; RV32I-FPELIM-NEXT: mv a4, zero ; RV32I-FPELIM-NEXT: call callee_many_scalars ; RV32I-FPELIM-NEXT: lw ra, 12(sp) ; RV32I-FPELIM-NEXT: addi sp, sp, 16 @@ -168,15 +168,15 @@ define i32 @caller_many_scalars() nounwind { ; RV32I-WITHFP-NEXT: addi s0, sp, 16 ; RV32I-WITHFP-NEXT: addi a0, zero, 8 ; RV32I-WITHFP-NEXT: sw a0, 4(sp) +; RV32I-WITHFP-NEXT: sw zero, 0(sp) ; RV32I-WITHFP-NEXT: addi a0, zero, 1 ; RV32I-WITHFP-NEXT: addi a1, zero, 2 ; RV32I-WITHFP-NEXT: addi a2, zero, 3 ; RV32I-WITHFP-NEXT: addi a3, zero, 4 +; RV32I-WITHFP-NEXT: mv a4, zero ; RV32I-WITHFP-NEXT: addi a5, zero, 5 ; RV32I-WITHFP-NEXT: addi a6, zero, 6 ; RV32I-WITHFP-NEXT: addi a7, zero, 7 -; RV32I-WITHFP-NEXT: sw zero, 0(sp) -; RV32I-WITHFP-NEXT: mv a4, zero ; RV32I-WITHFP-NEXT: call callee_many_scalars ; RV32I-WITHFP-NEXT: lw s0, 8(sp) ; RV32I-WITHFP-NEXT: lw ra, 12(sp) @@ -192,20 +192,20 @@ define i32 @caller_many_scalars() nounwind { define i32 @callee_large_scalars(i128 %a, fp128 %b) nounwind { ; RV32I-FPELIM-LABEL: callee_large_scalars: ; RV32I-FPELIM: # %bb.0: -; RV32I-FPELIM-NEXT: lw a6, 0(a1) -; RV32I-FPELIM-NEXT: lw a7, 0(a0) -; RV32I-FPELIM-NEXT: lw a4, 4(a1) -; RV32I-FPELIM-NEXT: lw a5, 12(a1) -; RV32I-FPELIM-NEXT: lw a2, 12(a0) -; RV32I-FPELIM-NEXT: lw a3, 4(a0) -; RV32I-FPELIM-NEXT: lw a1, 8(a1) -; RV32I-FPELIM-NEXT: lw a0, 8(a0) -; RV32I-FPELIM-NEXT: xor a2, a2, a5 -; RV32I-FPELIM-NEXT: xor a3, a3, a4 +; RV32I-FPELIM-NEXT: lw a2, 12(a1) +; RV32I-FPELIM-NEXT: lw a3, 12(a0) +; RV32I-FPELIM-NEXT: xor a2, a3, a2 +; RV32I-FPELIM-NEXT: lw a3, 4(a1) +; RV32I-FPELIM-NEXT: lw a4, 4(a0) +; RV32I-FPELIM-NEXT: xor a3, a4, a3 ; RV32I-FPELIM-NEXT: or a2, a3, a2 +; RV32I-FPELIM-NEXT: lw a3, 8(a1) +; RV32I-FPELIM-NEXT: lw a4, 8(a0) +; RV32I-FPELIM-NEXT: xor a3, a4, a3 +; RV32I-FPELIM-NEXT: lw a1, 0(a1) +; RV32I-FPELIM-NEXT: lw a0, 0(a0) ; RV32I-FPELIM-NEXT: xor a0, a0, a1 -; RV32I-FPELIM-NEXT: xor a1, a7, a6 -; RV32I-FPELIM-NEXT: or a0, a1, a0 +; RV32I-FPELIM-NEXT: or a0, a0, a3 ; RV32I-FPELIM-NEXT: or a0, a0, a2 ; RV32I-FPELIM-NEXT: seqz a0, a0 ; RV32I-FPELIM-NEXT: ret @@ -216,20 +216,20 @@ define i32 @callee_large_scalars(i128 %a, fp128 %b) nounwind { ; RV32I-WITHFP-NEXT: sw ra, 12(sp) ; RV32I-WITHFP-NEXT: sw s0, 8(sp) ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: lw a6, 0(a1) -; RV32I-WITHFP-NEXT: lw a7, 0(a0) -; RV32I-WITHFP-NEXT: lw a4, 4(a1) -; RV32I-WITHFP-NEXT: lw a5, 12(a1) -; RV32I-WITHFP-NEXT: lw a2, 12(a0) -; RV32I-WITHFP-NEXT: lw a3, 4(a0) -; RV32I-WITHFP-NEXT: lw a1, 8(a1) -; RV32I-WITHFP-NEXT: lw a0, 8(a0) -; RV32I-WITHFP-NEXT: xor a2, a2, a5 -; RV32I-WITHFP-NEXT: xor a3, a3, a4 +; RV32I-WITHFP-NEXT: lw a2, 12(a1) +; RV32I-WITHFP-NEXT: lw a3, 12(a0) +; RV32I-WITHFP-NEXT: xor a2, a3, a2 +; RV32I-WITHFP-NEXT: lw a3, 4(a1) +; RV32I-WITHFP-NEXT: lw a4, 4(a0) +; RV32I-WITHFP-NEXT: xor a3, a4, a3 ; RV32I-WITHFP-NEXT: or a2, a3, a2 +; RV32I-WITHFP-NEXT: lw a3, 8(a1) +; RV32I-WITHFP-NEXT: lw a4, 8(a0) +; RV32I-WITHFP-NEXT: xor a3, a4, a3 +; RV32I-WITHFP-NEXT: lw a1, 0(a1) +; RV32I-WITHFP-NEXT: lw a0, 0(a0) ; RV32I-WITHFP-NEXT: xor a0, a0, a1 -; RV32I-WITHFP-NEXT: xor a1, a7, a6 -; RV32I-WITHFP-NEXT: or a0, a1, a0 +; RV32I-WITHFP-NEXT: or a0, a0, a3 ; RV32I-WITHFP-NEXT: or a0, a0, a2 ; RV32I-WITHFP-NEXT: seqz a0, a0 ; RV32I-WITHFP-NEXT: lw s0, 8(sp) @@ -255,10 +255,10 @@ define i32 @caller_large_scalars() nounwind { ; RV32I-FPELIM-NEXT: sw zero, 36(sp) ; RV32I-FPELIM-NEXT: sw zero, 32(sp) ; RV32I-FPELIM-NEXT: sw zero, 28(sp) -; RV32I-FPELIM-NEXT: addi a2, zero, 1 +; RV32I-FPELIM-NEXT: addi a0, zero, 1 +; RV32I-FPELIM-NEXT: sw a0, 24(sp) ; RV32I-FPELIM-NEXT: addi a0, sp, 24 ; RV32I-FPELIM-NEXT: mv a1, sp -; RV32I-FPELIM-NEXT: sw a2, 24(sp) ; RV32I-FPELIM-NEXT: call callee_large_scalars ; RV32I-FPELIM-NEXT: lw ra, 44(sp) ; RV32I-FPELIM-NEXT: addi sp, sp, 48 @@ -278,10 +278,10 @@ define i32 @caller_large_scalars() nounwind { ; RV32I-WITHFP-NEXT: sw zero, -12(s0) ; RV32I-WITHFP-NEXT: sw zero, -16(s0) ; RV32I-WITHFP-NEXT: sw zero, -20(s0) -; RV32I-WITHFP-NEXT: addi a2, zero, 1 +; RV32I-WITHFP-NEXT: addi a0, zero, 1 +; RV32I-WITHFP-NEXT: sw a0, -24(s0) ; RV32I-WITHFP-NEXT: addi a0, s0, -24 ; RV32I-WITHFP-NEXT: addi a1, s0, -48 -; RV32I-WITHFP-NEXT: sw a2, -24(s0) ; RV32I-WITHFP-NEXT: call callee_large_scalars ; RV32I-WITHFP-NEXT: lw s0, 40(sp) ; RV32I-WITHFP-NEXT: lw ra, 44(sp) @@ -299,20 +299,20 @@ define i32 @callee_large_scalars_exhausted_regs(i32 %a, i32 %b, i32 %c, i32 %d, ; RV32I-FPELIM-LABEL: callee_large_scalars_exhausted_regs: ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: lw a0, 4(sp) -; RV32I-FPELIM-NEXT: lw a6, 0(a0) -; RV32I-FPELIM-NEXT: lw t0, 0(a7) -; RV32I-FPELIM-NEXT: lw a3, 4(a0) -; RV32I-FPELIM-NEXT: lw a4, 12(a0) -; RV32I-FPELIM-NEXT: lw a5, 12(a7) -; RV32I-FPELIM-NEXT: lw a1, 4(a7) -; RV32I-FPELIM-NEXT: lw a0, 8(a0) -; RV32I-FPELIM-NEXT: lw a2, 8(a7) -; RV32I-FPELIM-NEXT: xor a4, a5, a4 -; RV32I-FPELIM-NEXT: xor a1, a1, a3 -; RV32I-FPELIM-NEXT: or a1, a1, a4 -; RV32I-FPELIM-NEXT: xor a0, a2, a0 -; RV32I-FPELIM-NEXT: xor a2, t0, a6 -; RV32I-FPELIM-NEXT: or a0, a2, a0 +; RV32I-FPELIM-NEXT: lw a1, 12(a0) +; RV32I-FPELIM-NEXT: lw a2, 12(a7) +; RV32I-FPELIM-NEXT: xor a1, a2, a1 +; RV32I-FPELIM-NEXT: lw a2, 4(a0) +; RV32I-FPELIM-NEXT: lw a3, 4(a7) +; RV32I-FPELIM-NEXT: xor a2, a3, a2 +; RV32I-FPELIM-NEXT: or a1, a2, a1 +; RV32I-FPELIM-NEXT: lw a2, 8(a0) +; RV32I-FPELIM-NEXT: lw a3, 8(a7) +; RV32I-FPELIM-NEXT: xor a2, a3, a2 +; RV32I-FPELIM-NEXT: lw a0, 0(a0) +; RV32I-FPELIM-NEXT: lw a3, 0(a7) +; RV32I-FPELIM-NEXT: xor a0, a3, a0 +; RV32I-FPELIM-NEXT: or a0, a0, a2 ; RV32I-FPELIM-NEXT: or a0, a0, a1 ; RV32I-FPELIM-NEXT: seqz a0, a0 ; RV32I-FPELIM-NEXT: ret @@ -324,20 +324,20 @@ define i32 @callee_large_scalars_exhausted_regs(i32 %a, i32 %b, i32 %c, i32 %d, ; RV32I-WITHFP-NEXT: sw s0, 8(sp) ; RV32I-WITHFP-NEXT: addi s0, sp, 16 ; RV32I-WITHFP-NEXT: lw a0, 4(s0) -; RV32I-WITHFP-NEXT: lw a6, 0(a0) -; RV32I-WITHFP-NEXT: lw t0, 0(a7) -; RV32I-WITHFP-NEXT: lw a3, 4(a0) -; RV32I-WITHFP-NEXT: lw a4, 12(a0) -; RV32I-WITHFP-NEXT: lw a5, 12(a7) -; RV32I-WITHFP-NEXT: lw a1, 4(a7) -; RV32I-WITHFP-NEXT: lw a0, 8(a0) -; RV32I-WITHFP-NEXT: lw a2, 8(a7) -; RV32I-WITHFP-NEXT: xor a4, a5, a4 -; RV32I-WITHFP-NEXT: xor a1, a1, a3 -; RV32I-WITHFP-NEXT: or a1, a1, a4 -; RV32I-WITHFP-NEXT: xor a0, a2, a0 -; RV32I-WITHFP-NEXT: xor a2, t0, a6 -; RV32I-WITHFP-NEXT: or a0, a2, a0 +; RV32I-WITHFP-NEXT: lw a1, 12(a0) +; RV32I-WITHFP-NEXT: lw a2, 12(a7) +; RV32I-WITHFP-NEXT: xor a1, a2, a1 +; RV32I-WITHFP-NEXT: lw a2, 4(a0) +; RV32I-WITHFP-NEXT: lw a3, 4(a7) +; RV32I-WITHFP-NEXT: xor a2, a3, a2 +; RV32I-WITHFP-NEXT: or a1, a2, a1 +; RV32I-WITHFP-NEXT: lw a2, 8(a0) +; RV32I-WITHFP-NEXT: lw a3, 8(a7) +; RV32I-WITHFP-NEXT: xor a2, a3, a2 +; RV32I-WITHFP-NEXT: lw a0, 0(a0) +; RV32I-WITHFP-NEXT: lw a3, 0(a7) +; RV32I-WITHFP-NEXT: xor a0, a3, a0 +; RV32I-WITHFP-NEXT: or a0, a0, a2 ; RV32I-WITHFP-NEXT: or a0, a0, a1 ; RV32I-WITHFP-NEXT: seqz a0, a0 ; RV32I-WITHFP-NEXT: lw s0, 8(sp) @@ -367,7 +367,8 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind { ; RV32I-FPELIM-NEXT: sw zero, 52(sp) ; RV32I-FPELIM-NEXT: sw zero, 48(sp) ; RV32I-FPELIM-NEXT: sw zero, 44(sp) -; RV32I-FPELIM-NEXT: addi t0, zero, 8 +; RV32I-FPELIM-NEXT: addi a0, zero, 8 +; RV32I-FPELIM-NEXT: sw a0, 40(sp) ; RV32I-FPELIM-NEXT: addi a7, sp, 40 ; RV32I-FPELIM-NEXT: addi a0, zero, 1 ; RV32I-FPELIM-NEXT: addi a1, zero, 2 @@ -376,7 +377,6 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind { ; RV32I-FPELIM-NEXT: addi a4, zero, 5 ; RV32I-FPELIM-NEXT: addi a5, zero, 6 ; RV32I-FPELIM-NEXT: addi a6, zero, 7 -; RV32I-FPELIM-NEXT: sw t0, 40(sp) ; RV32I-FPELIM-NEXT: call callee_large_scalars_exhausted_regs ; RV32I-FPELIM-NEXT: lw ra, 60(sp) ; RV32I-FPELIM-NEXT: addi sp, sp, 64 @@ -400,7 +400,8 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind { ; RV32I-WITHFP-NEXT: sw zero, -12(s0) ; RV32I-WITHFP-NEXT: sw zero, -16(s0) ; RV32I-WITHFP-NEXT: sw zero, -20(s0) -; RV32I-WITHFP-NEXT: addi t0, zero, 8 +; RV32I-WITHFP-NEXT: addi a0, zero, 8 +; RV32I-WITHFP-NEXT: sw a0, -24(s0) ; RV32I-WITHFP-NEXT: addi a7, s0, -24 ; RV32I-WITHFP-NEXT: addi a0, zero, 1 ; RV32I-WITHFP-NEXT: addi a1, zero, 2 @@ -409,7 +410,6 @@ define i32 @caller_large_scalars_exhausted_regs() nounwind { ; RV32I-WITHFP-NEXT: addi a4, zero, 5 ; RV32I-WITHFP-NEXT: addi a5, zero, 6 ; RV32I-WITHFP-NEXT: addi a6, zero, 7 -; RV32I-WITHFP-NEXT: sw t0, -24(s0) ; RV32I-WITHFP-NEXT: call callee_large_scalars_exhausted_regs ; RV32I-WITHFP-NEXT: lw s0, 56(sp) ; RV32I-WITHFP-NEXT: lw ra, 60(sp) @@ -524,9 +524,9 @@ define i32 @caller_small_coerced_struct() nounwind { define i32 @callee_large_struct(%struct.large* byval align 4 %a) nounwind { ; RV32I-FPELIM-LABEL: callee_large_struct: ; RV32I-FPELIM: # %bb.0: -; RV32I-FPELIM-NEXT: lw a1, 0(a0) -; RV32I-FPELIM-NEXT: lw a0, 12(a0) -; RV32I-FPELIM-NEXT: add a0, a1, a0 +; RV32I-FPELIM-NEXT: lw a1, 12(a0) +; RV32I-FPELIM-NEXT: lw a0, 0(a0) +; RV32I-FPELIM-NEXT: add a0, a0, a1 ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: callee_large_struct: @@ -535,9 +535,9 @@ define i32 @callee_large_struct(%struct.large* byval align 4 %a) nounwind { ; RV32I-WITHFP-NEXT: sw ra, 12(sp) ; RV32I-WITHFP-NEXT: sw s0, 8(sp) ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: lw a1, 0(a0) -; RV32I-WITHFP-NEXT: lw a0, 12(a0) -; RV32I-WITHFP-NEXT: add a0, a1, a0 +; RV32I-WITHFP-NEXT: lw a1, 12(a0) +; RV32I-WITHFP-NEXT: lw a0, 0(a0) +; RV32I-WITHFP-NEXT: add a0, a0, a1 ; RV32I-WITHFP-NEXT: lw s0, 8(sp) ; RV32I-WITHFP-NEXT: lw ra, 12(sp) ; RV32I-WITHFP-NEXT: addi sp, sp, 16 @@ -557,16 +557,16 @@ define i32 @caller_large_struct() nounwind { ; RV32I-FPELIM-NEXT: sw ra, 44(sp) ; RV32I-FPELIM-NEXT: addi a0, zero, 1 ; RV32I-FPELIM-NEXT: sw a0, 24(sp) -; RV32I-FPELIM-NEXT: addi a1, zero, 2 -; RV32I-FPELIM-NEXT: sw a1, 28(sp) -; RV32I-FPELIM-NEXT: addi a2, zero, 3 -; RV32I-FPELIM-NEXT: sw a2, 32(sp) -; RV32I-FPELIM-NEXT: addi a3, zero, 4 -; RV32I-FPELIM-NEXT: sw a3, 36(sp) ; RV32I-FPELIM-NEXT: sw a0, 8(sp) -; RV32I-FPELIM-NEXT: sw a1, 12(sp) -; RV32I-FPELIM-NEXT: sw a2, 16(sp) -; RV32I-FPELIM-NEXT: sw a3, 20(sp) +; RV32I-FPELIM-NEXT: addi a0, zero, 2 +; RV32I-FPELIM-NEXT: sw a0, 28(sp) +; RV32I-FPELIM-NEXT: sw a0, 12(sp) +; RV32I-FPELIM-NEXT: addi a0, zero, 3 +; RV32I-FPELIM-NEXT: sw a0, 32(sp) +; RV32I-FPELIM-NEXT: sw a0, 16(sp) +; RV32I-FPELIM-NEXT: addi a0, zero, 4 +; RV32I-FPELIM-NEXT: sw a0, 36(sp) +; RV32I-FPELIM-NEXT: sw a0, 20(sp) ; RV32I-FPELIM-NEXT: addi a0, sp, 8 ; RV32I-FPELIM-NEXT: call callee_large_struct ; RV32I-FPELIM-NEXT: lw ra, 44(sp) @@ -581,16 +581,16 @@ define i32 @caller_large_struct() nounwind { ; RV32I-WITHFP-NEXT: addi s0, sp, 48 ; RV32I-WITHFP-NEXT: addi a0, zero, 1 ; RV32I-WITHFP-NEXT: sw a0, -24(s0) -; RV32I-WITHFP-NEXT: addi a1, zero, 2 -; RV32I-WITHFP-NEXT: sw a1, -20(s0) -; RV32I-WITHFP-NEXT: addi a2, zero, 3 -; RV32I-WITHFP-NEXT: sw a2, -16(s0) -; RV32I-WITHFP-NEXT: addi a3, zero, 4 -; RV32I-WITHFP-NEXT: sw a3, -12(s0) ; RV32I-WITHFP-NEXT: sw a0, -40(s0) -; RV32I-WITHFP-NEXT: sw a1, -36(s0) -; RV32I-WITHFP-NEXT: sw a2, -32(s0) -; RV32I-WITHFP-NEXT: sw a3, -28(s0) +; RV32I-WITHFP-NEXT: addi a0, zero, 2 +; RV32I-WITHFP-NEXT: sw a0, -20(s0) +; RV32I-WITHFP-NEXT: sw a0, -36(s0) +; RV32I-WITHFP-NEXT: addi a0, zero, 3 +; RV32I-WITHFP-NEXT: sw a0, -16(s0) +; RV32I-WITHFP-NEXT: sw a0, -32(s0) +; RV32I-WITHFP-NEXT: addi a0, zero, 4 +; RV32I-WITHFP-NEXT: sw a0, -12(s0) +; RV32I-WITHFP-NEXT: sw a0, -28(s0) ; RV32I-WITHFP-NEXT: addi a0, s0, -40 ; RV32I-WITHFP-NEXT: call callee_large_struct ; RV32I-WITHFP-NEXT: lw s0, 40(sp) @@ -619,14 +619,14 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 % ; RV32I-FPELIM-LABEL: callee_aligned_stack: ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: lw a0, 0(a2) -; RV32I-FPELIM-NEXT: lw a1, 20(sp) -; RV32I-FPELIM-NEXT: lw a2, 0(sp) -; RV32I-FPELIM-NEXT: lw a3, 8(sp) -; RV32I-FPELIM-NEXT: lw a4, 16(sp) ; RV32I-FPELIM-NEXT: add a0, a0, a7 -; RV32I-FPELIM-NEXT: add a0, a0, a2 -; RV32I-FPELIM-NEXT: add a0, a0, a3 -; RV32I-FPELIM-NEXT: add a0, a0, a4 +; RV32I-FPELIM-NEXT: lw a1, 0(sp) +; RV32I-FPELIM-NEXT: add a0, a0, a1 +; RV32I-FPELIM-NEXT: lw a1, 8(sp) +; RV32I-FPELIM-NEXT: add a0, a0, a1 +; RV32I-FPELIM-NEXT: lw a1, 16(sp) +; RV32I-FPELIM-NEXT: add a0, a0, a1 +; RV32I-FPELIM-NEXT: lw a1, 20(sp) ; RV32I-FPELIM-NEXT: add a0, a0, a1 ; RV32I-FPELIM-NEXT: ret ; @@ -637,14 +637,14 @@ define i32 @callee_aligned_stack(i32 %a, i32 %b, fp128 %c, i32 %d, i32 %e, i64 % ; RV32I-WITHFP-NEXT: sw s0, 8(sp) ; RV32I-WITHFP-NEXT: addi s0, sp, 16 ; RV32I-WITHFP-NEXT: lw a0, 0(a2) -; RV32I-WITHFP-NEXT: lw a1, 20(s0) -; RV32I-WITHFP-NEXT: lw a2, 0(s0) -; RV32I-WITHFP-NEXT: lw a3, 8(s0) -; RV32I-WITHFP-NEXT: lw a4, 16(s0) ; RV32I-WITHFP-NEXT: add a0, a0, a7 -; RV32I-WITHFP-NEXT: add a0, a0, a2 -; RV32I-WITHFP-NEXT: add a0, a0, a3 -; RV32I-WITHFP-NEXT: add a0, a0, a4 +; RV32I-WITHFP-NEXT: lw a1, 0(s0) +; RV32I-WITHFP-NEXT: add a0, a0, a1 +; RV32I-WITHFP-NEXT: lw a1, 8(s0) +; RV32I-WITHFP-NEXT: add a0, a0, a1 +; RV32I-WITHFP-NEXT: lw a1, 16(s0) +; RV32I-WITHFP-NEXT: add a0, a0, a1 +; RV32I-WITHFP-NEXT: lw a1, 20(s0) ; RV32I-WITHFP-NEXT: add a0, a0, a1 ; RV32I-WITHFP-NEXT: lw s0, 8(sp) ; RV32I-WITHFP-NEXT: lw ra, 12(sp) @@ -690,7 +690,8 @@ define void @caller_aligned_stack() nounwind { ; RV32I-FPELIM-NEXT: addi a0, a0, -328 ; RV32I-FPELIM-NEXT: sw a0, 36(sp) ; RV32I-FPELIM-NEXT: lui a0, 335544 -; RV32I-FPELIM-NEXT: addi t0, a0, 1311 +; RV32I-FPELIM-NEXT: addi a0, a0, 1311 +; RV32I-FPELIM-NEXT: sw a0, 32(sp) ; RV32I-FPELIM-NEXT: lui a0, 688509 ; RV32I-FPELIM-NEXT: addi a5, a0, -2048 ; RV32I-FPELIM-NEXT: addi a2, sp, 32 @@ -700,7 +701,6 @@ define void @caller_aligned_stack() nounwind { ; RV32I-FPELIM-NEXT: addi a4, zero, 13 ; RV32I-FPELIM-NEXT: addi a6, zero, 4 ; RV32I-FPELIM-NEXT: addi a7, zero, 14 -; RV32I-FPELIM-NEXT: sw t0, 32(sp) ; RV32I-FPELIM-NEXT: call callee_aligned_stack ; RV32I-FPELIM-NEXT: lw ra, 60(sp) ; RV32I-FPELIM-NEXT: addi sp, sp, 64 @@ -733,7 +733,8 @@ define void @caller_aligned_stack() nounwind { ; RV32I-WITHFP-NEXT: addi a0, a0, -328 ; RV32I-WITHFP-NEXT: sw a0, -28(s0) ; RV32I-WITHFP-NEXT: lui a0, 335544 -; RV32I-WITHFP-NEXT: addi t0, a0, 1311 +; RV32I-WITHFP-NEXT: addi a0, a0, 1311 +; RV32I-WITHFP-NEXT: sw a0, -32(s0) ; RV32I-WITHFP-NEXT: lui a0, 688509 ; RV32I-WITHFP-NEXT: addi a5, a0, -2048 ; RV32I-WITHFP-NEXT: addi a2, s0, -32 @@ -743,7 +744,6 @@ define void @caller_aligned_stack() nounwind { ; RV32I-WITHFP-NEXT: addi a4, zero, 13 ; RV32I-WITHFP-NEXT: addi a6, zero, 4 ; RV32I-WITHFP-NEXT: addi a7, zero, 14 -; RV32I-WITHFP-NEXT: sw t0, -32(s0) ; RV32I-WITHFP-NEXT: call callee_aligned_stack ; RV32I-WITHFP-NEXT: lw s0, 56(sp) ; RV32I-WITHFP-NEXT: lw ra, 60(sp) @@ -787,15 +787,17 @@ define i32 @caller_small_scalar_ret() nounwind { ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: addi sp, sp, -16 ; RV32I-FPELIM-NEXT: sw ra, 12(sp) +; RV32I-FPELIM-NEXT: sw s0, 8(sp) +; RV32I-FPELIM-NEXT: lui a0, 56 +; RV32I-FPELIM-NEXT: addi s0, a0, 580 ; RV32I-FPELIM-NEXT: call callee_small_scalar_ret -; RV32I-FPELIM-NEXT: lui a2, 56 -; RV32I-FPELIM-NEXT: addi a2, a2, 580 -; RV32I-FPELIM-NEXT: xor a1, a1, a2 +; RV32I-FPELIM-NEXT: xor a1, a1, s0 ; RV32I-FPELIM-NEXT: lui a2, 200614 ; RV32I-FPELIM-NEXT: addi a2, a2, 647 ; RV32I-FPELIM-NEXT: xor a0, a0, a2 ; RV32I-FPELIM-NEXT: or a0, a0, a1 ; RV32I-FPELIM-NEXT: seqz a0, a0 +; RV32I-FPELIM-NEXT: lw s0, 8(sp) ; RV32I-FPELIM-NEXT: lw ra, 12(sp) ; RV32I-FPELIM-NEXT: addi sp, sp, 16 ; RV32I-FPELIM-NEXT: ret @@ -805,16 +807,18 @@ define i32 @caller_small_scalar_ret() nounwind { ; RV32I-WITHFP-NEXT: addi sp, sp, -16 ; RV32I-WITHFP-NEXT: sw ra, 12(sp) ; RV32I-WITHFP-NEXT: sw s0, 8(sp) +; RV32I-WITHFP-NEXT: sw s1, 4(sp) ; RV32I-WITHFP-NEXT: addi s0, sp, 16 +; RV32I-WITHFP-NEXT: lui a0, 56 +; RV32I-WITHFP-NEXT: addi s1, a0, 580 ; RV32I-WITHFP-NEXT: call callee_small_scalar_ret -; RV32I-WITHFP-NEXT: lui a2, 56 -; RV32I-WITHFP-NEXT: addi a2, a2, 580 -; RV32I-WITHFP-NEXT: xor a1, a1, a2 +; RV32I-WITHFP-NEXT: xor a1, a1, s1 ; RV32I-WITHFP-NEXT: lui a2, 200614 ; RV32I-WITHFP-NEXT: addi a2, a2, 647 ; RV32I-WITHFP-NEXT: xor a0, a0, a2 ; RV32I-WITHFP-NEXT: or a0, a0, a1 ; RV32I-WITHFP-NEXT: seqz a0, a0 +; RV32I-WITHFP-NEXT: lw s1, 4(sp) ; RV32I-WITHFP-NEXT: lw s0, 8(sp) ; RV32I-WITHFP-NEXT: lw ra, 12(sp) ; RV32I-WITHFP-NEXT: addi sp, sp, 16 @@ -942,14 +946,14 @@ define void @caller_large_scalar_ret() nounwind { define void @callee_large_struct_ret(%struct.large* noalias sret %agg.result) nounwind { ; RV32I-FPELIM-LABEL: callee_large_struct_ret: ; RV32I-FPELIM: # %bb.0: -; RV32I-FPELIM-NEXT: addi a1, zero, 1 -; RV32I-FPELIM-NEXT: sw a1, 0(a0) -; RV32I-FPELIM-NEXT: addi a1, zero, 2 -; RV32I-FPELIM-NEXT: sw a1, 4(a0) -; RV32I-FPELIM-NEXT: addi a1, zero, 3 -; RV32I-FPELIM-NEXT: sw a1, 8(a0) ; RV32I-FPELIM-NEXT: addi a1, zero, 4 ; RV32I-FPELIM-NEXT: sw a1, 12(a0) +; RV32I-FPELIM-NEXT: addi a1, zero, 3 +; RV32I-FPELIM-NEXT: sw a1, 8(a0) +; RV32I-FPELIM-NEXT: addi a1, zero, 2 +; RV32I-FPELIM-NEXT: sw a1, 4(a0) +; RV32I-FPELIM-NEXT: addi a1, zero, 1 +; RV32I-FPELIM-NEXT: sw a1, 0(a0) ; RV32I-FPELIM-NEXT: ret ; ; RV32I-WITHFP-LABEL: callee_large_struct_ret: @@ -958,14 +962,14 @@ define void @callee_large_struct_ret(%struct.large* noalias sret %agg.result) no ; RV32I-WITHFP-NEXT: sw ra, 12(sp) ; RV32I-WITHFP-NEXT: sw s0, 8(sp) ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: addi a1, zero, 1 -; RV32I-WITHFP-NEXT: sw a1, 0(a0) -; RV32I-WITHFP-NEXT: addi a1, zero, 2 -; RV32I-WITHFP-NEXT: sw a1, 4(a0) -; RV32I-WITHFP-NEXT: addi a1, zero, 3 -; RV32I-WITHFP-NEXT: sw a1, 8(a0) ; RV32I-WITHFP-NEXT: addi a1, zero, 4 ; RV32I-WITHFP-NEXT: sw a1, 12(a0) +; RV32I-WITHFP-NEXT: addi a1, zero, 3 +; RV32I-WITHFP-NEXT: sw a1, 8(a0) +; RV32I-WITHFP-NEXT: addi a1, zero, 2 +; RV32I-WITHFP-NEXT: sw a1, 4(a0) +; RV32I-WITHFP-NEXT: addi a1, zero, 1 +; RV32I-WITHFP-NEXT: sw a1, 0(a0) ; RV32I-WITHFP-NEXT: lw s0, 8(sp) ; RV32I-WITHFP-NEXT: lw ra, 12(sp) ; RV32I-WITHFP-NEXT: addi sp, sp, 16 @@ -988,9 +992,9 @@ define i32 @caller_large_struct_ret() nounwind { ; RV32I-FPELIM-NEXT: sw ra, 28(sp) ; RV32I-FPELIM-NEXT: addi a0, sp, 8 ; RV32I-FPELIM-NEXT: call callee_large_struct_ret -; RV32I-FPELIM-NEXT: lw a0, 8(sp) -; RV32I-FPELIM-NEXT: lw a1, 20(sp) -; RV32I-FPELIM-NEXT: add a0, a0, a1 +; RV32I-FPELIM-NEXT: lw a0, 20(sp) +; RV32I-FPELIM-NEXT: lw a1, 8(sp) +; RV32I-FPELIM-NEXT: add a0, a1, a0 ; RV32I-FPELIM-NEXT: lw ra, 28(sp) ; RV32I-FPELIM-NEXT: addi sp, sp, 32 ; RV32I-FPELIM-NEXT: ret @@ -1003,9 +1007,9 @@ define i32 @caller_large_struct_ret() nounwind { ; RV32I-WITHFP-NEXT: addi s0, sp, 32 ; RV32I-WITHFP-NEXT: addi a0, s0, -24 ; RV32I-WITHFP-NEXT: call callee_large_struct_ret -; RV32I-WITHFP-NEXT: lw a0, -24(s0) -; RV32I-WITHFP-NEXT: lw a1, -12(s0) -; RV32I-WITHFP-NEXT: add a0, a0, a1 +; RV32I-WITHFP-NEXT: lw a0, -12(s0) +; RV32I-WITHFP-NEXT: lw a1, -24(s0) +; RV32I-WITHFP-NEXT: add a0, a1, a0 ; RV32I-WITHFP-NEXT: lw s0, 24(sp) ; RV32I-WITHFP-NEXT: lw ra, 28(sp) ; RV32I-WITHFP-NEXT: addi sp, sp, 32 diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll index 8ff3de7..ddedab5 100644 --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32.ll @@ -107,15 +107,15 @@ define i32 @caller_float_on_stack() nounwind { ; RV32I-FPELIM: # %bb.0: ; RV32I-FPELIM-NEXT: addi sp, sp, -16 ; RV32I-FPELIM-NEXT: sw ra, 12(sp) -; RV32I-FPELIM-NEXT: lui a1, 264704 +; RV32I-FPELIM-NEXT: lui a0, 264704 +; RV32I-FPELIM-NEXT: sw a0, 0(sp) ; RV32I-FPELIM-NEXT: addi a0, zero, 1 -; RV32I-FPELIM-NEXT: addi a2, zero, 2 -; RV32I-FPELIM-NEXT: addi a4, zero, 3 -; RV32I-FPELIM-NEXT: addi a6, zero, 4 -; RV32I-FPELIM-NEXT: sw a1, 0(sp) ; RV32I-FPELIM-NEXT: mv a1, zero +; RV32I-FPELIM-NEXT: addi a2, zero, 2 ; RV32I-FPELIM-NEXT: mv a3, zero +; RV32I-FPELIM-NEXT: addi a4, zero, 3 ; RV32I-FPELIM-NEXT: mv a5, zero +; RV32I-FPELIM-NEXT: addi a6, zero, 4 ; RV32I-FPELIM-NEXT: mv a7, zero ; RV32I-FPELIM-NEXT: call callee_float_on_stack ; RV32I-FPELIM-NEXT: lw ra, 12(sp) @@ -128,15 +128,15 @@ define i32 @caller_float_on_stack() nounwind { ; RV32I-WITHFP-NEXT: sw ra, 12(sp) ; RV32I-WITHFP-NEXT: sw s0, 8(sp) ; RV32I-WITHFP-NEXT: addi s0, sp, 16 -; RV32I-WITHFP-NEXT: lui a1, 264704 +; RV32I-WITHFP-NEXT: lui a0, 264704 +; RV32I-WITHFP-NEXT: sw a0, 0(sp) ; RV32I-WITHFP-NEXT: addi a0, zero, 1 -; RV32I-WITHFP-NEXT: addi a2, zero, 2 -; RV32I-WITHFP-NEXT: addi a4, zero, 3 -; RV32I-WITHFP-NEXT: addi a6, zero, 4 -; RV32I-WITHFP-NEXT: sw a1, 0(sp) ; RV32I-WITHFP-NEXT: mv a1, zero +; RV32I-WITHFP-NEXT: addi a2, zero, 2 ; RV32I-WITHFP-NEXT: mv a3, zero +; RV32I-WITHFP-NEXT: addi a4, zero, 3 ; RV32I-WITHFP-NEXT: mv a5, zero +; RV32I-WITHFP-NEXT: addi a6, zero, 4 ; RV32I-WITHFP-NEXT: mv a7, zero ; RV32I-WITHFP-NEXT: call callee_float_on_stack ; RV32I-WITHFP-NEXT: lw s0, 8(sp) diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll index 5ad002f..37dfa7b 100644 --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32d.ll @@ -38,9 +38,9 @@ define i32 @caller_double_in_fpr() nounwind { define i32 @callee_double_in_fpr_exhausted_gprs(i64 %a, i64 %b, i64 %c, i64 %d, i32 %e, double %f) nounwind { ; RV32-ILP32D-LABEL: callee_double_in_fpr_exhausted_gprs: ; RV32-ILP32D: # %bb.0: -; RV32-ILP32D-NEXT: lw a0, 0(sp) -; RV32-ILP32D-NEXT: fcvt.w.d a1, fa0, rtz -; RV32-ILP32D-NEXT: add a0, a0, a1 +; RV32-ILP32D-NEXT: fcvt.w.d a0, fa0, rtz +; RV32-ILP32D-NEXT: lw a1, 0(sp) +; RV32-ILP32D-NEXT: add a0, a1, a0 ; RV32-ILP32D-NEXT: ret %f_fptosi = fptosi double %f to i32 %1 = add i32 %e, %f_fptosi @@ -52,18 +52,18 @@ define i32 @caller_double_in_fpr_exhausted_gprs() nounwind { ; RV32-ILP32D: # %bb.0: ; RV32-ILP32D-NEXT: addi sp, sp, -16 ; RV32-ILP32D-NEXT: sw ra, 12(sp) -; RV32-ILP32D-NEXT: addi a1, zero, 5 +; RV32-ILP32D-NEXT: addi a0, zero, 5 +; RV32-ILP32D-NEXT: sw a0, 0(sp) ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI3_0) ; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI3_0) ; RV32-ILP32D-NEXT: fld fa0, 0(a0) ; RV32-ILP32D-NEXT: addi a0, zero, 1 -; RV32-ILP32D-NEXT: addi a2, zero, 2 -; RV32-ILP32D-NEXT: addi a4, zero, 3 -; RV32-ILP32D-NEXT: addi a6, zero, 4 -; RV32-ILP32D-NEXT: sw a1, 0(sp) ; RV32-ILP32D-NEXT: mv a1, zero +; RV32-ILP32D-NEXT: addi a2, zero, 2 ; RV32-ILP32D-NEXT: mv a3, zero +; RV32-ILP32D-NEXT: addi a4, zero, 3 ; RV32-ILP32D-NEXT: mv a5, zero +; RV32-ILP32D-NEXT: addi a6, zero, 4 ; RV32-ILP32D-NEXT: mv a7, zero ; RV32-ILP32D-NEXT: call callee_double_in_fpr_exhausted_gprs ; RV32-ILP32D-NEXT: lw ra, 12(sp) @@ -82,9 +82,9 @@ define i32 @callee_double_in_gpr_exhausted_fprs(double %a, double %b, double %c, ; RV32-ILP32D-NEXT: sw a0, 8(sp) ; RV32-ILP32D-NEXT: sw a1, 12(sp) ; RV32-ILP32D-NEXT: fld ft0, 8(sp) -; RV32-ILP32D-NEXT: fcvt.w.d a0, fa7, rtz -; RV32-ILP32D-NEXT: fcvt.w.d a1, ft0, rtz -; RV32-ILP32D-NEXT: add a0, a0, a1 +; RV32-ILP32D-NEXT: fcvt.w.d a0, ft0, rtz +; RV32-ILP32D-NEXT: fcvt.w.d a1, fa7, rtz +; RV32-ILP32D-NEXT: add a0, a1, a0 ; RV32-ILP32D-NEXT: addi sp, sp, 16 ; RV32-ILP32D-NEXT: ret %h_fptosi = fptosi double %h to i32 @@ -100,21 +100,21 @@ define i32 @caller_double_in_gpr_exhausted_fprs() nounwind { ; RV32-ILP32D-NEXT: sw ra, 12(sp) ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI5_0) ; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI5_0) -; RV32-ILP32D-NEXT: fld fa0, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI5_1) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI5_1) -; RV32-ILP32D-NEXT: fld fa1, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI5_2) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI5_2) -; RV32-ILP32D-NEXT: fld fa2, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI5_3) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI5_3) -; RV32-ILP32D-NEXT: fld fa3, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI5_4) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI5_4) -; RV32-ILP32D-NEXT: fld fa4, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI5_5) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI5_5) +; RV32-ILP32D-NEXT: lui a1, %hi(.LCPI5_1) +; RV32-ILP32D-NEXT: addi a1, a1, %lo(.LCPI5_1) +; RV32-ILP32D-NEXT: lui a2, %hi(.LCPI5_2) +; RV32-ILP32D-NEXT: addi a2, a2, %lo(.LCPI5_2) +; RV32-ILP32D-NEXT: lui a3, %hi(.LCPI5_3) +; RV32-ILP32D-NEXT: addi a3, a3, %lo(.LCPI5_3) +; RV32-ILP32D-NEXT: lui a4, %hi(.LCPI5_4) +; RV32-ILP32D-NEXT: addi a4, a4, %lo(.LCPI5_4) +; RV32-ILP32D-NEXT: lui a5, %hi(.LCPI5_5) +; RV32-ILP32D-NEXT: addi a5, a5, %lo(.LCPI5_5) +; RV32-ILP32D-NEXT: fld fa0, 0(a5) +; RV32-ILP32D-NEXT: fld fa1, 0(a4) +; RV32-ILP32D-NEXT: fld fa2, 0(a3) +; RV32-ILP32D-NEXT: fld fa3, 0(a2) +; RV32-ILP32D-NEXT: fld fa4, 0(a1) ; RV32-ILP32D-NEXT: fld fa5, 0(a0) ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI5_6) ; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI5_6) @@ -122,8 +122,8 @@ define i32 @caller_double_in_gpr_exhausted_fprs() nounwind { ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI5_7) ; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI5_7) ; RV32-ILP32D-NEXT: fld fa7, 0(a0) -; RV32-ILP32D-NEXT: lui a1, 262688 ; RV32-ILP32D-NEXT: mv a0, zero +; RV32-ILP32D-NEXT: lui a1, 262688 ; RV32-ILP32D-NEXT: call callee_double_in_gpr_exhausted_fprs ; RV32-ILP32D-NEXT: lw ra, 12(sp) ; RV32-ILP32D-NEXT: addi sp, sp, 16 @@ -157,39 +157,39 @@ define i32 @caller_double_in_gpr_and_stack_almost_exhausted_gprs_fprs() nounwind ; RV32-ILP32D: # %bb.0: ; RV32-ILP32D-NEXT: addi sp, sp, -16 ; RV32-ILP32D-NEXT: sw ra, 12(sp) -; RV32-ILP32D-NEXT: lui a1, 262816 +; RV32-ILP32D-NEXT: lui a0, 262816 +; RV32-ILP32D-NEXT: sw a0, 0(sp) ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI7_0) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI7_0) -; RV32-ILP32D-NEXT: fld fa0, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI7_1) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI7_1) -; RV32-ILP32D-NEXT: fld fa1, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI7_2) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI7_2) -; RV32-ILP32D-NEXT: fld fa2, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI7_3) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI7_3) -; RV32-ILP32D-NEXT: fld fa3, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI7_4) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI7_4) -; RV32-ILP32D-NEXT: fld fa4, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI7_5) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI7_5) -; RV32-ILP32D-NEXT: fld fa5, 0(a0) +; RV32-ILP32D-NEXT: addi a6, a0, %lo(.LCPI7_0) +; RV32-ILP32D-NEXT: lui a1, %hi(.LCPI7_1) +; RV32-ILP32D-NEXT: addi a1, a1, %lo(.LCPI7_1) +; RV32-ILP32D-NEXT: lui a2, %hi(.LCPI7_2) +; RV32-ILP32D-NEXT: addi a2, a2, %lo(.LCPI7_2) +; RV32-ILP32D-NEXT: lui a3, %hi(.LCPI7_3) +; RV32-ILP32D-NEXT: addi a3, a3, %lo(.LCPI7_3) +; RV32-ILP32D-NEXT: lui a4, %hi(.LCPI7_4) +; RV32-ILP32D-NEXT: addi a4, a4, %lo(.LCPI7_4) +; RV32-ILP32D-NEXT: lui a5, %hi(.LCPI7_5) +; RV32-ILP32D-NEXT: addi a5, a5, %lo(.LCPI7_5) ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI7_6) ; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI7_6) -; RV32-ILP32D-NEXT: fld fa6, 0(a0) +; RV32-ILP32D-NEXT: fld fa0, 0(a0) +; RV32-ILP32D-NEXT: fld fa1, 0(a5) +; RV32-ILP32D-NEXT: fld fa2, 0(a4) +; RV32-ILP32D-NEXT: fld fa3, 0(a3) +; RV32-ILP32D-NEXT: fld fa4, 0(a2) +; RV32-ILP32D-NEXT: fld fa5, 0(a1) +; RV32-ILP32D-NEXT: fld fa6, 0(a6) ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI7_7) ; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI7_7) ; RV32-ILP32D-NEXT: fld fa7, 0(a0) ; RV32-ILP32D-NEXT: addi a0, zero, 1 -; RV32-ILP32D-NEXT: addi a2, zero, 3 -; RV32-ILP32D-NEXT: addi a4, zero, 5 -; RV32-ILP32D-NEXT: addi a6, zero, 7 -; RV32-ILP32D-NEXT: sw a1, 0(sp) ; RV32-ILP32D-NEXT: mv a1, zero +; RV32-ILP32D-NEXT: addi a2, zero, 3 ; RV32-ILP32D-NEXT: mv a3, zero +; RV32-ILP32D-NEXT: addi a4, zero, 5 ; RV32-ILP32D-NEXT: mv a5, zero +; RV32-ILP32D-NEXT: addi a6, zero, 7 ; RV32-ILP32D-NEXT: mv a7, zero ; RV32-ILP32D-NEXT: call callee_double_in_gpr_and_stack_almost_exhausted_gprs_fprs ; RV32-ILP32D-NEXT: lw ra, 12(sp) @@ -223,38 +223,38 @@ define i32 @caller_double_on_stack_exhausted_gprs_fprs() nounwind { ; RV32-ILP32D-NEXT: sw ra, 12(sp) ; RV32-ILP32D-NEXT: lui a0, 262816 ; RV32-ILP32D-NEXT: sw a0, 4(sp) +; RV32-ILP32D-NEXT: sw zero, 0(sp) ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI9_0) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI9_0) -; RV32-ILP32D-NEXT: fld fa0, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI9_1) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI9_1) -; RV32-ILP32D-NEXT: fld fa1, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI9_2) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI9_2) -; RV32-ILP32D-NEXT: fld fa2, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI9_3) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI9_3) -; RV32-ILP32D-NEXT: fld fa3, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI9_4) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI9_4) -; RV32-ILP32D-NEXT: fld fa4, 0(a0) -; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI9_5) -; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI9_5) -; RV32-ILP32D-NEXT: fld fa5, 0(a0) +; RV32-ILP32D-NEXT: addi a6, a0, %lo(.LCPI9_0) +; RV32-ILP32D-NEXT: lui a1, %hi(.LCPI9_1) +; RV32-ILP32D-NEXT: addi a1, a1, %lo(.LCPI9_1) +; RV32-ILP32D-NEXT: lui a2, %hi(.LCPI9_2) +; RV32-ILP32D-NEXT: addi a2, a2, %lo(.LCPI9_2) +; RV32-ILP32D-NEXT: lui a3, %hi(.LCPI9_3) +; RV32-ILP32D-NEXT: addi a3, a3, %lo(.LCPI9_3) +; RV32-ILP32D-NEXT: lui a4, %hi(.LCPI9_4) +; RV32-ILP32D-NEXT: addi a4, a4, %lo(.LCPI9_4) +; RV32-ILP32D-NEXT: lui a5, %hi(.LCPI9_5) +; RV32-ILP32D-NEXT: addi a5, a5, %lo(.LCPI9_5) ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI9_6) ; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI9_6) -; RV32-ILP32D-NEXT: fld fa6, 0(a0) +; RV32-ILP32D-NEXT: fld fa0, 0(a0) +; RV32-ILP32D-NEXT: fld fa1, 0(a5) +; RV32-ILP32D-NEXT: fld fa2, 0(a4) +; RV32-ILP32D-NEXT: fld fa3, 0(a3) +; RV32-ILP32D-NEXT: fld fa4, 0(a2) +; RV32-ILP32D-NEXT: fld fa5, 0(a1) +; RV32-ILP32D-NEXT: fld fa6, 0(a6) ; RV32-ILP32D-NEXT: lui a0, %hi(.LCPI9_7) ; RV32-ILP32D-NEXT: addi a0, a0, %lo(.LCPI9_7) ; RV32-ILP32D-NEXT: fld fa7, 0(a0) ; RV32-ILP32D-NEXT: addi a0, zero, 1 -; RV32-ILP32D-NEXT: addi a2, zero, 3 -; RV32-ILP32D-NEXT: addi a4, zero, 5 -; RV32-ILP32D-NEXT: addi a6, zero, 7 -; RV32-ILP32D-NEXT: sw zero, 0(sp) ; RV32-ILP32D-NEXT: mv a1, zero +; RV32-ILP32D-NEXT: addi a2, zero, 3 ; RV32-ILP32D-NEXT: mv a3, zero +; RV32-ILP32D-NEXT: addi a4, zero, 5 ; RV32-ILP32D-NEXT: mv a5, zero +; RV32-ILP32D-NEXT: addi a6, zero, 7 ; RV32-ILP32D-NEXT: mv a7, zero ; RV32-ILP32D-NEXT: call callee_double_on_stack_exhausted_gprs_fprs ; RV32-ILP32D-NEXT: lw ra, 12(sp) diff --git a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll index 70b2505..5f3042d 100644 --- a/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-ilp32f-ilp32d-common.ll @@ -41,9 +41,9 @@ define i32 @caller_float_in_fpr() nounwind { define i32 @callee_float_in_fpr_exhausted_gprs(i64 %a, i64 %b, i64 %c, i64 %d, i32 %e, float %f) nounwind { ; RV32-ILP32FD-LABEL: callee_float_in_fpr_exhausted_gprs: ; RV32-ILP32FD: # %bb.0: -; RV32-ILP32FD-NEXT: lw a0, 0(sp) -; RV32-ILP32FD-NEXT: fcvt.w.s a1, fa0, rtz -; RV32-ILP32FD-NEXT: add a0, a0, a1 +; RV32-ILP32FD-NEXT: fcvt.w.s a0, fa0, rtz +; RV32-ILP32FD-NEXT: lw a1, 0(sp) +; RV32-ILP32FD-NEXT: add a0, a1, a0 ; RV32-ILP32FD-NEXT: ret %f_fptosi = fptosi float %f to i32 %1 = add i32 %e, %f_fptosi @@ -55,18 +55,18 @@ define i32 @caller_float_in_fpr_exhausted_gprs() nounwind { ; RV32-ILP32FD: # %bb.0: ; RV32-ILP32FD-NEXT: addi sp, sp, -16 ; RV32-ILP32FD-NEXT: sw ra, 12(sp) -; RV32-ILP32FD-NEXT: addi a1, zero, 5 +; RV32-ILP32FD-NEXT: addi a0, zero, 5 +; RV32-ILP32FD-NEXT: sw a0, 0(sp) ; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI3_0) ; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI3_0) ; RV32-ILP32FD-NEXT: flw fa0, 0(a0) ; RV32-ILP32FD-NEXT: addi a0, zero, 1 -; RV32-ILP32FD-NEXT: addi a2, zero, 2 -; RV32-ILP32FD-NEXT: addi a4, zero, 3 -; RV32-ILP32FD-NEXT: addi a6, zero, 4 -; RV32-ILP32FD-NEXT: sw a1, 0(sp) ; RV32-ILP32FD-NEXT: mv a1, zero +; RV32-ILP32FD-NEXT: addi a2, zero, 2 ; RV32-ILP32FD-NEXT: mv a3, zero +; RV32-ILP32FD-NEXT: addi a4, zero, 3 ; RV32-ILP32FD-NEXT: mv a5, zero +; RV32-ILP32FD-NEXT: addi a6, zero, 4 ; RV32-ILP32FD-NEXT: mv a7, zero ; RV32-ILP32FD-NEXT: call callee_float_in_fpr_exhausted_gprs ; RV32-ILP32FD-NEXT: lw ra, 12(sp) @@ -81,10 +81,10 @@ define i32 @caller_float_in_fpr_exhausted_gprs() nounwind { define i32 @callee_float_in_gpr_exhausted_fprs(float %a, float %b, float %c, float %d, float %e, float %f, float %g, float %h, float %i) nounwind { ; RV32-ILP32FD-LABEL: callee_float_in_gpr_exhausted_fprs: ; RV32-ILP32FD: # %bb.0: +; RV32-ILP32FD-NEXT: fcvt.w.s a1, fa7, rtz ; RV32-ILP32FD-NEXT: fmv.w.x ft0, a0 -; RV32-ILP32FD-NEXT: fcvt.w.s a0, fa7, rtz -; RV32-ILP32FD-NEXT: fcvt.w.s a1, ft0, rtz -; RV32-ILP32FD-NEXT: add a0, a0, a1 +; RV32-ILP32FD-NEXT: fcvt.w.s a0, ft0, rtz +; RV32-ILP32FD-NEXT: add a0, a1, a0 ; RV32-ILP32FD-NEXT: ret %h_fptosi = fptosi float %h to i32 %i_fptosi = fptosi float %i to i32 @@ -99,21 +99,21 @@ define i32 @caller_float_in_gpr_exhausted_fprs() nounwind { ; RV32-ILP32FD-NEXT: sw ra, 12(sp) ; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_0) ; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI5_0) -; RV32-ILP32FD-NEXT: flw fa0, 0(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_1) -; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI5_1) -; RV32-ILP32FD-NEXT: flw fa1, 0(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_2) -; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI5_2) -; RV32-ILP32FD-NEXT: flw fa2, 0(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_3) -; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI5_3) -; RV32-ILP32FD-NEXT: flw fa3, 0(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_4) -; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI5_4) -; RV32-ILP32FD-NEXT: flw fa4, 0(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_5) -; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI5_5) +; RV32-ILP32FD-NEXT: lui a1, %hi(.LCPI5_1) +; RV32-ILP32FD-NEXT: addi a1, a1, %lo(.LCPI5_1) +; RV32-ILP32FD-NEXT: lui a2, %hi(.LCPI5_2) +; RV32-ILP32FD-NEXT: addi a2, a2, %lo(.LCPI5_2) +; RV32-ILP32FD-NEXT: lui a3, %hi(.LCPI5_3) +; RV32-ILP32FD-NEXT: addi a3, a3, %lo(.LCPI5_3) +; RV32-ILP32FD-NEXT: lui a4, %hi(.LCPI5_4) +; RV32-ILP32FD-NEXT: addi a4, a4, %lo(.LCPI5_4) +; RV32-ILP32FD-NEXT: lui a5, %hi(.LCPI5_5) +; RV32-ILP32FD-NEXT: addi a5, a5, %lo(.LCPI5_5) +; RV32-ILP32FD-NEXT: flw fa0, 0(a5) +; RV32-ILP32FD-NEXT: flw fa1, 0(a4) +; RV32-ILP32FD-NEXT: flw fa2, 0(a3) +; RV32-ILP32FD-NEXT: flw fa3, 0(a2) +; RV32-ILP32FD-NEXT: flw fa4, 0(a1) ; RV32-ILP32FD-NEXT: flw fa5, 0(a0) ; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI5_6) ; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI5_6) @@ -151,39 +151,39 @@ define i32 @caller_float_on_stack_exhausted_gprs_fprs() nounwind { ; RV32-ILP32FD: # %bb.0: ; RV32-ILP32FD-NEXT: addi sp, sp, -16 ; RV32-ILP32FD-NEXT: sw ra, 12(sp) -; RV32-ILP32FD-NEXT: lui a1, 267520 +; RV32-ILP32FD-NEXT: lui a0, 267520 +; RV32-ILP32FD-NEXT: sw a0, 0(sp) ; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_0) -; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI7_0) -; RV32-ILP32FD-NEXT: flw fa0, 0(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_1) -; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI7_1) -; RV32-ILP32FD-NEXT: flw fa1, 0(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_2) -; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI7_2) -; RV32-ILP32FD-NEXT: flw fa2, 0(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_3) -; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI7_3) -; RV32-ILP32FD-NEXT: flw fa3, 0(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_4) -; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI7_4) -; RV32-ILP32FD-NEXT: flw fa4, 0(a0) -; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_5) -; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI7_5) -; RV32-ILP32FD-NEXT: flw fa5, 0(a0) +; RV32-ILP32FD-NEXT: addi a6, a0, %lo(.LCPI7_0) +; RV32-ILP32FD-NEXT: lui a1, %hi(.LCPI7_1) +; RV32-ILP32FD-NEXT: addi a1, a1, %lo(.LCPI7_1) +; RV32-ILP32FD-NEXT: lui a2, %hi(.LCPI7_2) +; RV32-ILP32FD-NEXT: addi a2, a2, %lo(.LCPI7_2) +; RV32-ILP32FD-NEXT: lui a3, %hi(.LCPI7_3) +; RV32-ILP32FD-NEXT: addi a3, a3, %lo(.LCPI7_3) +; RV32-ILP32FD-NEXT: lui a4, %hi(.LCPI7_4) +; RV32-ILP32FD-NEXT: addi a4, a4, %lo(.LCPI7_4) +; RV32-ILP32FD-NEXT: lui a5, %hi(.LCPI7_5) +; RV32-ILP32FD-NEXT: addi a5, a5, %lo(.LCPI7_5) ; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_6) ; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI7_6) -; RV32-ILP32FD-NEXT: flw fa6, 0(a0) +; RV32-ILP32FD-NEXT: flw fa0, 0(a0) +; RV32-ILP32FD-NEXT: flw fa1, 0(a5) +; RV32-ILP32FD-NEXT: flw fa2, 0(a4) +; RV32-ILP32FD-NEXT: flw fa3, 0(a3) +; RV32-ILP32FD-NEXT: flw fa4, 0(a2) +; RV32-ILP32FD-NEXT: flw fa5, 0(a1) +; RV32-ILP32FD-NEXT: flw fa6, 0(a6) ; RV32-ILP32FD-NEXT: lui a0, %hi(.LCPI7_7) ; RV32-ILP32FD-NEXT: addi a0, a0, %lo(.LCPI7_7) ; RV32-ILP32FD-NEXT: flw fa7, 0(a0) ; RV32-ILP32FD-NEXT: addi a0, zero, 1 -; RV32-ILP32FD-NEXT: addi a2, zero, 3 -; RV32-ILP32FD-NEXT: addi a4, zero, 5 -; RV32-ILP32FD-NEXT: addi a6, zero, 7 -; RV32-ILP32FD-NEXT: sw a1, 0(sp) ; RV32-ILP32FD-NEXT: mv a1, zero +; RV32-ILP32FD-NEXT: addi a2, zero, 3 ; RV32-ILP32FD-NEXT: mv a3, zero +; RV32-ILP32FD-NEXT: addi a4, zero, 5 ; RV32-ILP32FD-NEXT: mv a5, zero +; RV32-ILP32FD-NEXT: addi a6, zero, 7 ; RV32-ILP32FD-NEXT: mv a7, zero ; RV32-ILP32FD-NEXT: call callee_float_on_stack_exhausted_gprs_fprs ; RV32-ILP32FD-NEXT: lw ra, 12(sp) diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll index 95cf39e..23bb630 100644 --- a/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64-lp64f-lp64d-common.ll @@ -48,22 +48,22 @@ define i64 @caller_i128_in_regs() nounwind { define i32 @callee_many_scalars(i8 %a, i16 %b, i32 %c, i128 %d, i32 %e, i32 %f, i128 %g, i32 %h) nounwind { ; RV64I-LABEL: callee_many_scalars: ; RV64I: # %bb.0: -; RV64I-NEXT: lw t0, 8(sp) -; RV64I-NEXT: ld t1, 0(sp) -; RV64I-NEXT: andi t2, a0, 255 -; RV64I-NEXT: lui a0, 16 -; RV64I-NEXT: addiw a0, a0, -1 -; RV64I-NEXT: and a0, a1, a0 -; RV64I-NEXT: add a0, t2, a0 +; RV64I-NEXT: ld t0, 0(sp) +; RV64I-NEXT: xor a4, a4, t0 +; RV64I-NEXT: xor a3, a3, a7 +; RV64I-NEXT: or a3, a3, a4 +; RV64I-NEXT: lui a4, 16 +; RV64I-NEXT: addiw a4, a4, -1 +; RV64I-NEXT: and a1, a1, a4 +; RV64I-NEXT: andi a0, a0, 255 +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: add a0, a0, a2 -; RV64I-NEXT: xor a1, a4, t1 -; RV64I-NEXT: xor a2, a3, a7 -; RV64I-NEXT: or a1, a2, a1 -; RV64I-NEXT: seqz a1, a1 +; RV64I-NEXT: seqz a1, a3 ; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: add a0, a0, a5 ; RV64I-NEXT: add a0, a0, a6 -; RV64I-NEXT: addw a0, a0, t0 +; RV64I-NEXT: lw a1, 8(sp) +; RV64I-NEXT: addw a0, a0, a1 ; RV64I-NEXT: ret %a_ext = zext i8 %a to i32 %b_ext = zext i16 %b to i32 @@ -85,15 +85,15 @@ define i32 @caller_many_scalars() nounwind { ; RV64I-NEXT: sd ra, 24(sp) ; RV64I-NEXT: addi a0, zero, 8 ; RV64I-NEXT: sd a0, 8(sp) +; RV64I-NEXT: sd zero, 0(sp) ; RV64I-NEXT: addi a0, zero, 1 ; RV64I-NEXT: addi a1, zero, 2 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: addi a3, zero, 4 +; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: addi a5, zero, 5 ; RV64I-NEXT: addi a6, zero, 6 ; RV64I-NEXT: addi a7, zero, 7 -; RV64I-NEXT: sd zero, 0(sp) -; RV64I-NEXT: mv a4, zero ; RV64I-NEXT: call callee_many_scalars ; RV64I-NEXT: ld ra, 24(sp) ; RV64I-NEXT: addi sp, sp, 32 @@ -107,20 +107,20 @@ define i32 @caller_many_scalars() nounwind { define i64 @callee_large_scalars(i256 %a, i256 %b) nounwind { ; RV64I-LABEL: callee_large_scalars: ; RV64I: # %bb.0: -; RV64I-NEXT: ld a6, 0(a1) -; RV64I-NEXT: ld a7, 0(a0) -; RV64I-NEXT: ld a4, 8(a1) -; RV64I-NEXT: ld a5, 24(a1) -; RV64I-NEXT: ld a2, 24(a0) -; RV64I-NEXT: ld a3, 8(a0) -; RV64I-NEXT: ld a1, 16(a1) -; RV64I-NEXT: ld a0, 16(a0) -; RV64I-NEXT: xor a2, a2, a5 -; RV64I-NEXT: xor a3, a3, a4 +; RV64I-NEXT: ld a2, 24(a1) +; RV64I-NEXT: ld a3, 24(a0) +; RV64I-NEXT: xor a2, a3, a2 +; RV64I-NEXT: ld a3, 8(a1) +; RV64I-NEXT: ld a4, 8(a0) +; RV64I-NEXT: xor a3, a4, a3 ; RV64I-NEXT: or a2, a3, a2 +; RV64I-NEXT: ld a3, 16(a1) +; RV64I-NEXT: ld a4, 16(a0) +; RV64I-NEXT: xor a3, a4, a3 +; RV64I-NEXT: ld a1, 0(a1) +; RV64I-NEXT: ld a0, 0(a0) ; RV64I-NEXT: xor a0, a0, a1 -; RV64I-NEXT: xor a1, a7, a6 -; RV64I-NEXT: or a0, a1, a0 +; RV64I-NEXT: or a0, a0, a3 ; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ret @@ -134,18 +134,18 @@ define i64 @caller_large_scalars() nounwind { ; RV64I: # %bb.0: ; RV64I-NEXT: addi sp, sp, -80 ; RV64I-NEXT: sd ra, 72(sp) +; RV64I-NEXT: addi a0, zero, 2 +; RV64I-NEXT: sd a0, 0(sp) ; RV64I-NEXT: sd zero, 24(sp) ; RV64I-NEXT: sd zero, 16(sp) ; RV64I-NEXT: sd zero, 8(sp) -; RV64I-NEXT: addi a0, zero, 2 -; RV64I-NEXT: sd a0, 0(sp) ; RV64I-NEXT: sd zero, 56(sp) ; RV64I-NEXT: sd zero, 48(sp) ; RV64I-NEXT: sd zero, 40(sp) -; RV64I-NEXT: addi a2, zero, 1 +; RV64I-NEXT: addi a0, zero, 1 +; RV64I-NEXT: sd a0, 32(sp) ; RV64I-NEXT: addi a0, sp, 32 ; RV64I-NEXT: mv a1, sp -; RV64I-NEXT: sd a2, 32(sp) ; RV64I-NEXT: call callee_large_scalars ; RV64I-NEXT: ld ra, 72(sp) ; RV64I-NEXT: addi sp, sp, 80 @@ -162,20 +162,20 @@ define i64 @callee_large_scalars_exhausted_regs(i64 %a, i64 %b, i64 %c, i64 %d, ; RV64I-LABEL: callee_large_scalars_exhausted_regs: ; RV64I: # %bb.0: ; RV64I-NEXT: ld a0, 8(sp) -; RV64I-NEXT: ld a6, 0(a0) -; RV64I-NEXT: ld t0, 0(a7) -; RV64I-NEXT: ld a3, 8(a0) -; RV64I-NEXT: ld a4, 24(a0) -; RV64I-NEXT: ld a5, 24(a7) -; RV64I-NEXT: ld a1, 8(a7) -; RV64I-NEXT: ld a0, 16(a0) -; RV64I-NEXT: ld a2, 16(a7) -; RV64I-NEXT: xor a4, a5, a4 -; RV64I-NEXT: xor a1, a1, a3 -; RV64I-NEXT: or a1, a1, a4 -; RV64I-NEXT: xor a0, a2, a0 -; RV64I-NEXT: xor a2, t0, a6 -; RV64I-NEXT: or a0, a2, a0 +; RV64I-NEXT: ld a1, 24(a0) +; RV64I-NEXT: ld a2, 24(a7) +; RV64I-NEXT: xor a1, a2, a1 +; RV64I-NEXT: ld a2, 8(a0) +; RV64I-NEXT: ld a3, 8(a7) +; RV64I-NEXT: xor a2, a3, a2 +; RV64I-NEXT: or a1, a2, a1 +; RV64I-NEXT: ld a2, 16(a0) +; RV64I-NEXT: ld a3, 16(a7) +; RV64I-NEXT: xor a2, a3, a2 +; RV64I-NEXT: ld a0, 0(a0) +; RV64I-NEXT: ld a3, 0(a7) +; RV64I-NEXT: xor a0, a3, a0 +; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: or a0, a0, a1 ; RV64I-NEXT: seqz a0, a0 ; RV64I-NEXT: ret @@ -193,15 +193,16 @@ define i64 @caller_large_scalars_exhausted_regs() nounwind { ; RV64I-NEXT: sd a0, 8(sp) ; RV64I-NEXT: addi a0, zero, 9 ; RV64I-NEXT: sd a0, 0(sp) +; RV64I-NEXT: addi a0, zero, 10 +; RV64I-NEXT: sd a0, 16(sp) ; RV64I-NEXT: sd zero, 40(sp) ; RV64I-NEXT: sd zero, 32(sp) ; RV64I-NEXT: sd zero, 24(sp) -; RV64I-NEXT: addi a0, zero, 10 -; RV64I-NEXT: sd a0, 16(sp) ; RV64I-NEXT: sd zero, 72(sp) ; RV64I-NEXT: sd zero, 64(sp) ; RV64I-NEXT: sd zero, 56(sp) -; RV64I-NEXT: addi t0, zero, 8 +; RV64I-NEXT: addi a0, zero, 8 +; RV64I-NEXT: sd a0, 48(sp) ; RV64I-NEXT: addi a7, sp, 48 ; RV64I-NEXT: addi a0, zero, 1 ; RV64I-NEXT: addi a1, zero, 2 @@ -210,7 +211,6 @@ define i64 @caller_large_scalars_exhausted_regs() nounwind { ; RV64I-NEXT: addi a4, zero, 5 ; RV64I-NEXT: addi a5, zero, 6 ; RV64I-NEXT: addi a6, zero, 7 -; RV64I-NEXT: sd t0, 48(sp) ; RV64I-NEXT: call callee_large_scalars_exhausted_regs ; RV64I-NEXT: ld ra, 88(sp) ; RV64I-NEXT: addi sp, sp, 96 @@ -277,9 +277,9 @@ define i64 @caller_small_coerced_struct() nounwind { define i64 @callee_large_struct(%struct.large* byval align 8 %a) nounwind { ; RV64I-LABEL: callee_large_struct: ; RV64I: # %bb.0: -; RV64I-NEXT: ld a1, 0(a0) -; RV64I-NEXT: ld a0, 24(a0) -; RV64I-NEXT: add a0, a1, a0 +; RV64I-NEXT: ld a1, 24(a0) +; RV64I-NEXT: ld a0, 0(a0) +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret %1 = getelementptr inbounds %struct.large, %struct.large* %a, i64 0, i32 0 %2 = getelementptr inbounds %struct.large, %struct.large* %a, i64 0, i32 3 @@ -296,16 +296,16 @@ define i64 @caller_large_struct() nounwind { ; RV64I-NEXT: sd ra, 72(sp) ; RV64I-NEXT: addi a0, zero, 1 ; RV64I-NEXT: sd a0, 40(sp) -; RV64I-NEXT: addi a1, zero, 2 -; RV64I-NEXT: sd a1, 48(sp) -; RV64I-NEXT: addi a2, zero, 3 -; RV64I-NEXT: sd a2, 56(sp) -; RV64I-NEXT: addi a3, zero, 4 -; RV64I-NEXT: sd a3, 64(sp) ; RV64I-NEXT: sd a0, 8(sp) -; RV64I-NEXT: sd a1, 16(sp) -; RV64I-NEXT: sd a2, 24(sp) -; RV64I-NEXT: sd a3, 32(sp) +; RV64I-NEXT: addi a0, zero, 2 +; RV64I-NEXT: sd a0, 48(sp) +; RV64I-NEXT: sd a0, 16(sp) +; RV64I-NEXT: addi a0, zero, 3 +; RV64I-NEXT: sd a0, 56(sp) +; RV64I-NEXT: sd a0, 24(sp) +; RV64I-NEXT: addi a0, zero, 4 +; RV64I-NEXT: sd a0, 64(sp) +; RV64I-NEXT: sd a0, 32(sp) ; RV64I-NEXT: addi a0, sp, 8 ; RV64I-NEXT: call callee_large_struct ; RV64I-NEXT: ld ra, 72(sp) @@ -332,15 +332,15 @@ define i64 @callee_aligned_stack(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i128 %f ; should only be 8-byte aligned ; RV64I-LABEL: callee_aligned_stack: ; RV64I: # %bb.0: -; RV64I-NEXT: ld a0, 40(sp) +; RV64I-NEXT: add a0, a5, a7 ; RV64I-NEXT: ld a1, 0(sp) -; RV64I-NEXT: ld a2, 16(sp) -; RV64I-NEXT: ld a3, 32(sp) -; RV64I-NEXT: add a4, a5, a7 -; RV64I-NEXT: add a1, a4, a1 -; RV64I-NEXT: add a1, a1, a2 -; RV64I-NEXT: add a1, a1, a3 -; RV64I-NEXT: add a0, a1, a0 +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ld a1, 16(sp) +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ld a1, 32(sp) +; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ld a1, 40(sp) +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret %f_trunc = trunc i128 %f to i64 %1 = add i64 %f_trunc, %g @@ -366,19 +366,19 @@ define void @caller_aligned_stack() nounwind { ; RV64I-NEXT: sd a0, 40(sp) ; RV64I-NEXT: addi a0, zero, 10 ; RV64I-NEXT: sd a0, 32(sp) -; RV64I-NEXT: sd zero, 24(sp) ; RV64I-NEXT: addi a0, zero, 9 ; RV64I-NEXT: sd a0, 16(sp) -; RV64I-NEXT: addi a6, zero, 8 +; RV64I-NEXT: addi a0, zero, 8 +; RV64I-NEXT: sd a0, 0(sp) +; RV64I-NEXT: sd zero, 24(sp) ; RV64I-NEXT: addi a0, zero, 1 ; RV64I-NEXT: addi a1, zero, 2 ; RV64I-NEXT: addi a2, zero, 3 ; RV64I-NEXT: addi a3, zero, 4 ; RV64I-NEXT: addi a4, zero, 5 ; RV64I-NEXT: addi a5, zero, 6 -; RV64I-NEXT: addi a7, zero, 7 -; RV64I-NEXT: sd a6, 0(sp) ; RV64I-NEXT: mv a6, zero +; RV64I-NEXT: addi a7, zero, 7 ; RV64I-NEXT: call callee_aligned_stack ; RV64I-NEXT: ld ra, 56(sp) ; RV64I-NEXT: addi sp, sp, 64 @@ -482,18 +482,18 @@ define void @caller_large_scalar_ret() nounwind { define void @callee_large_struct_ret(%struct.large* noalias sret %agg.result) nounwind { ; RV64I-LABEL: callee_large_struct_ret: ; RV64I: # %bb.0: -; RV64I-NEXT: sw zero, 4(a0) -; RV64I-NEXT: addi a1, zero, 1 -; RV64I-NEXT: sw a1, 0(a0) -; RV64I-NEXT: sw zero, 12(a0) -; RV64I-NEXT: addi a1, zero, 2 -; RV64I-NEXT: sw a1, 8(a0) -; RV64I-NEXT: sw zero, 20(a0) +; RV64I-NEXT: addi a1, zero, 4 +; RV64I-NEXT: sw a1, 24(a0) ; RV64I-NEXT: addi a1, zero, 3 ; RV64I-NEXT: sw a1, 16(a0) +; RV64I-NEXT: addi a1, zero, 2 +; RV64I-NEXT: sw a1, 8(a0) ; RV64I-NEXT: sw zero, 28(a0) -; RV64I-NEXT: addi a1, zero, 4 -; RV64I-NEXT: sw a1, 24(a0) +; RV64I-NEXT: sw zero, 20(a0) +; RV64I-NEXT: sw zero, 12(a0) +; RV64I-NEXT: sw zero, 4(a0) +; RV64I-NEXT: addi a1, zero, 1 +; RV64I-NEXT: sw a1, 0(a0) ; RV64I-NEXT: ret %a = getelementptr inbounds %struct.large, %struct.large* %agg.result, i64 0, i32 0 store i64 1, i64* %a, align 4 @@ -513,9 +513,9 @@ define i64 @caller_large_struct_ret() nounwind { ; RV64I-NEXT: sd ra, 40(sp) ; RV64I-NEXT: addi a0, sp, 8 ; RV64I-NEXT: call callee_large_struct_ret -; RV64I-NEXT: ld a0, 8(sp) -; RV64I-NEXT: ld a1, 32(sp) -; RV64I-NEXT: add a0, a0, a1 +; RV64I-NEXT: ld a0, 32(sp) +; RV64I-NEXT: ld a1, 8(sp) +; RV64I-NEXT: add a0, a1, a0 ; RV64I-NEXT: ld ra, 40(sp) ; RV64I-NEXT: addi sp, sp, 48 ; RV64I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll index 3135fcd..3e6941c 100644 --- a/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-lp64.ll @@ -108,15 +108,15 @@ define i64 @caller_float_on_stack() nounwind { ; RV64I-FPELIM: # %bb.0: ; RV64I-FPELIM-NEXT: addi sp, sp, -16 ; RV64I-FPELIM-NEXT: sd ra, 8(sp) -; RV64I-FPELIM-NEXT: lui a1, 264704 +; RV64I-FPELIM-NEXT: lui a0, 264704 +; RV64I-FPELIM-NEXT: sd a0, 0(sp) ; RV64I-FPELIM-NEXT: addi a0, zero, 1 -; RV64I-FPELIM-NEXT: addi a2, zero, 2 -; RV64I-FPELIM-NEXT: addi a4, zero, 3 -; RV64I-FPELIM-NEXT: addi a6, zero, 4 -; RV64I-FPELIM-NEXT: sd a1, 0(sp) ; RV64I-FPELIM-NEXT: mv a1, zero +; RV64I-FPELIM-NEXT: addi a2, zero, 2 ; RV64I-FPELIM-NEXT: mv a3, zero +; RV64I-FPELIM-NEXT: addi a4, zero, 3 ; RV64I-FPELIM-NEXT: mv a5, zero +; RV64I-FPELIM-NEXT: addi a6, zero, 4 ; RV64I-FPELIM-NEXT: mv a7, zero ; RV64I-FPELIM-NEXT: call callee_float_on_stack ; RV64I-FPELIM-NEXT: ld ra, 8(sp) @@ -129,15 +129,15 @@ define i64 @caller_float_on_stack() nounwind { ; RV64I-WITHFP-NEXT: sd ra, 24(sp) ; RV64I-WITHFP-NEXT: sd s0, 16(sp) ; RV64I-WITHFP-NEXT: addi s0, sp, 32 -; RV64I-WITHFP-NEXT: lui a1, 264704 +; RV64I-WITHFP-NEXT: lui a0, 264704 +; RV64I-WITHFP-NEXT: sd a0, 0(sp) ; RV64I-WITHFP-NEXT: addi a0, zero, 1 -; RV64I-WITHFP-NEXT: addi a2, zero, 2 -; RV64I-WITHFP-NEXT: addi a4, zero, 3 -; RV64I-WITHFP-NEXT: addi a6, zero, 4 -; RV64I-WITHFP-NEXT: sd a1, 0(sp) ; RV64I-WITHFP-NEXT: mv a1, zero +; RV64I-WITHFP-NEXT: addi a2, zero, 2 ; RV64I-WITHFP-NEXT: mv a3, zero +; RV64I-WITHFP-NEXT: addi a4, zero, 3 ; RV64I-WITHFP-NEXT: mv a5, zero +; RV64I-WITHFP-NEXT: addi a6, zero, 4 ; RV64I-WITHFP-NEXT: mv a7, zero ; RV64I-WITHFP-NEXT: call callee_float_on_stack ; RV64I-WITHFP-NEXT: ld s0, 16(sp) diff --git a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll index bc86428..9a68f00 100644 --- a/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll +++ b/llvm/test/CodeGen/RISCV/calling-conv-rv32f-ilp32.ll @@ -33,15 +33,15 @@ define float @caller_onstack_f32_noop(float %a) nounwind { ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) ; RV32IF-NEXT: sw a0, 4(sp) -; RV32IF-NEXT: lui a1, 264704 +; RV32IF-NEXT: lui a0, 264704 +; RV32IF-NEXT: sw a0, 0(sp) ; RV32IF-NEXT: addi a0, zero, 1 -; RV32IF-NEXT: addi a2, zero, 2 -; RV32IF-NEXT: addi a4, zero, 3 -; RV32IF-NEXT: addi a6, zero, 4 -; RV32IF-NEXT: sw a1, 0(sp) ; RV32IF-NEXT: mv a1, zero +; RV32IF-NEXT: addi a2, zero, 2 ; RV32IF-NEXT: mv a3, zero +; RV32IF-NEXT: addi a4, zero, 3 ; RV32IF-NEXT: mv a5, zero +; RV32IF-NEXT: addi a6, zero, 4 ; RV32IF-NEXT: mv a7, zero ; RV32IF-NEXT: call onstack_f32_noop ; RV32IF-NEXT: lw ra, 12(sp) @@ -56,19 +56,19 @@ define float @caller_onstack_f32_fadd(float %a, float %b) nounwind { ; RV32IF: # %bb.0: ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) -; RV32IF-NEXT: fmv.w.x ft0, a1 -; RV32IF-NEXT: fmv.w.x ft1, a0 -; RV32IF-NEXT: fadd.s ft2, ft1, ft0 -; RV32IF-NEXT: fsub.s ft0, ft0, ft1 -; RV32IF-NEXT: fsw ft0, 4(sp) +; RV32IF-NEXT: fmv.w.x ft0, a0 +; RV32IF-NEXT: fmv.w.x ft1, a1 +; RV32IF-NEXT: fsub.s ft2, ft1, ft0 +; RV32IF-NEXT: fsw ft2, 4(sp) +; RV32IF-NEXT: fadd.s ft0, ft0, ft1 +; RV32IF-NEXT: fsw ft0, 0(sp) ; RV32IF-NEXT: addi a0, zero, 1 -; RV32IF-NEXT: addi a2, zero, 2 -; RV32IF-NEXT: addi a4, zero, 3 -; RV32IF-NEXT: addi a6, zero, 4 -; RV32IF-NEXT: fsw ft2, 0(sp) ; RV32IF-NEXT: mv a1, zero +; RV32IF-NEXT: addi a2, zero, 2 ; RV32IF-NEXT: mv a3, zero +; RV32IF-NEXT: addi a4, zero, 3 ; RV32IF-NEXT: mv a5, zero +; RV32IF-NEXT: addi a6, zero, 4 ; RV32IF-NEXT: mv a7, zero ; RV32IF-NEXT: call onstack_f32_noop ; RV32IF-NEXT: lw ra, 12(sp) diff --git a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll index 6430a22..0706a61 100644 --- a/llvm/test/CodeGen/RISCV/codemodel-lowering.ll +++ b/llvm/test/CodeGen/RISCV/codemodel-lowering.ll @@ -61,9 +61,9 @@ define signext i32 @lower_blockaddress_displ(i32 signext %w) nounwind { ; RV32I-SMALL-NEXT: sw ra, 12(sp) ; RV32I-SMALL-NEXT: lui a1, %hi(.Ltmp0) ; RV32I-SMALL-NEXT: addi a1, a1, %lo(.Ltmp0) -; RV32I-SMALL-NEXT: addi a2, zero, 101 ; RV32I-SMALL-NEXT: sw a1, 8(sp) -; RV32I-SMALL-NEXT: blt a0, a2, .LBB2_3 +; RV32I-SMALL-NEXT: addi a1, zero, 101 +; RV32I-SMALL-NEXT: blt a0, a1, .LBB2_3 ; RV32I-SMALL-NEXT: # %bb.1: # %if.then ; RV32I-SMALL-NEXT: lw a0, 8(sp) ; RV32I-SMALL-NEXT: jr a0 @@ -86,9 +86,9 @@ define signext i32 @lower_blockaddress_displ(i32 signext %w) nounwind { ; RV32I-MEDIUM-NEXT: # Label of block must be emitted ; RV32I-MEDIUM-NEXT: auipc a1, %pcrel_hi(.Ltmp0) ; RV32I-MEDIUM-NEXT: addi a1, a1, %pcrel_lo(.LBB2_5) -; RV32I-MEDIUM-NEXT: addi a2, zero, 101 ; RV32I-MEDIUM-NEXT: sw a1, 8(sp) -; RV32I-MEDIUM-NEXT: blt a0, a2, .LBB2_3 +; RV32I-MEDIUM-NEXT: addi a1, zero, 101 +; RV32I-MEDIUM-NEXT: blt a0, a1, .LBB2_3 ; RV32I-MEDIUM-NEXT: # %bb.1: # %if.then ; RV32I-MEDIUM-NEXT: lw a0, 8(sp) ; RV32I-MEDIUM-NEXT: jr a0 @@ -131,11 +131,11 @@ indirectgoto: define float @lower_constantpool(float %a) nounwind { ; RV32I-SMALL-LABEL: lower_constantpool: ; RV32I-SMALL: # %bb.0: -; RV32I-SMALL-NEXT: lui a1, %hi(.LCPI3_0) -; RV32I-SMALL-NEXT: addi a1, a1, %lo(.LCPI3_0) -; RV32I-SMALL-NEXT: flw ft0, 0(a1) -; RV32I-SMALL-NEXT: fmv.w.x ft1, a0 -; RV32I-SMALL-NEXT: fadd.s ft0, ft1, ft0 +; RV32I-SMALL-NEXT: fmv.w.x ft0, a0 +; RV32I-SMALL-NEXT: lui a0, %hi(.LCPI3_0) +; RV32I-SMALL-NEXT: addi a0, a0, %lo(.LCPI3_0) +; RV32I-SMALL-NEXT: flw ft1, 0(a0) +; RV32I-SMALL-NEXT: fadd.s ft0, ft0, ft1 ; RV32I-SMALL-NEXT: fmv.x.w a0, ft0 ; RV32I-SMALL-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/compress.ll b/llvm/test/CodeGen/RISCV/compress.ll index b58adb8..6ee215d 100644 --- a/llvm/test/CodeGen/RISCV/compress.ll +++ b/llvm/test/CodeGen/RISCV/compress.ll @@ -19,10 +19,10 @@ define i32 @simple_arith(i32 %a, i32 %b) nounwind { ; RV32IC-LABEL: simple_arith: -; RV32IC: addi a2, a0, 1 +; RV32IC: c.srai a1, 9 +; RV32IC-NEXT: addi a2, a0, 1 ; RV32IC-NEXT: c.andi a2, 11 ; RV32IC-NEXT: c.slli a2, 7 -; RV32IC-NEXT: c.srai a1, 9 ; RV32IC-NEXT: c.add a1, a2 ; RV32IC-NEXT: sub a0, a1, a0 ; RV32IC-NEXT: c.jr ra diff --git a/llvm/test/CodeGen/RISCV/div.ll b/llvm/test/CodeGen/RISCV/div.ll index e3504b9..1fd0084 100644 --- a/llvm/test/CodeGen/RISCV/div.ll +++ b/llvm/test/CodeGen/RISCV/div.ll @@ -457,8 +457,8 @@ define i64 @sdiv64_sext_operands(i32 %a, i32 %b) nounwind { ; ; RV64IM-LABEL: sdiv64_sext_operands: ; RV64IM: # %bb.0: -; RV64IM-NEXT: sext.w a0, a0 ; RV64IM-NEXT: sext.w a1, a1 +; RV64IM-NEXT: sext.w a0, a0 ; RV64IM-NEXT: div a0, a0, a1 ; RV64IM-NEXT: ret %1 = sext i32 %a to i64 diff --git a/llvm/test/CodeGen/RISCV/double-arith.ll b/llvm/test/CodeGen/RISCV/double-arith.ll index ad68dca..2b69621 100644 --- a/llvm/test/CodeGen/RISCV/double-arith.ll +++ b/llvm/test/CodeGen/RISCV/double-arith.ll @@ -473,13 +473,13 @@ define double @fmsub_d(double %a, double %b, double %c) nounwind { ; ; RV64IFD-LABEL: fmsub_d: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a3, %hi(.LCPI15_0) -; RV64IFD-NEXT: addi a3, a3, %lo(.LCPI15_0) -; RV64IFD-NEXT: fld ft0, 0(a3) +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: lui a2, %hi(.LCPI15_0) +; RV64IFD-NEXT: addi a2, a2, %lo(.LCPI15_0) +; RV64IFD-NEXT: fld ft1, 0(a2) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 ; RV64IFD-NEXT: fmv.d.x ft1, a1 ; RV64IFD-NEXT: fmv.d.x ft2, a0 -; RV64IFD-NEXT: fmv.d.x ft3, a2 -; RV64IFD-NEXT: fadd.d ft0, ft3, ft0 ; RV64IFD-NEXT: fmsub.d ft0, ft2, ft1, ft0 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret @@ -496,18 +496,18 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind { ; RV32IFD-NEXT: sw a2, 8(sp) ; RV32IFD-NEXT: sw a3, 12(sp) ; RV32IFD-NEXT: fld ft0, 8(sp) -; RV32IFD-NEXT: sw a4, 8(sp) -; RV32IFD-NEXT: sw a5, 12(sp) -; RV32IFD-NEXT: fld ft1, 8(sp) ; RV32IFD-NEXT: sw a0, 8(sp) ; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: sw a4, 8(sp) +; RV32IFD-NEXT: sw a5, 12(sp) ; RV32IFD-NEXT: fld ft2, 8(sp) ; RV32IFD-NEXT: lui a0, %hi(.LCPI16_0) ; RV32IFD-NEXT: addi a0, a0, %lo(.LCPI16_0) ; RV32IFD-NEXT: fld ft3, 0(a0) ; RV32IFD-NEXT: fadd.d ft2, ft2, ft3 ; RV32IFD-NEXT: fadd.d ft1, ft1, ft3 -; RV32IFD-NEXT: fnmadd.d ft0, ft2, ft0, ft1 +; RV32IFD-NEXT: fnmadd.d ft0, ft1, ft0, ft2 ; RV32IFD-NEXT: fsd ft0, 8(sp) ; RV32IFD-NEXT: lw a0, 8(sp) ; RV32IFD-NEXT: lw a1, 12(sp) @@ -516,15 +516,15 @@ define double @fnmadd_d(double %a, double %b, double %c) nounwind { ; ; RV64IFD-LABEL: fnmadd_d: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a3, %hi(.LCPI16_0) -; RV64IFD-NEXT: addi a3, a3, %lo(.LCPI16_0) -; RV64IFD-NEXT: fld ft0, 0(a3) -; RV64IFD-NEXT: fmv.d.x ft1, a1 -; RV64IFD-NEXT: fmv.d.x ft2, a2 -; RV64IFD-NEXT: fmv.d.x ft3, a0 -; RV64IFD-NEXT: fadd.d ft3, ft3, ft0 -; RV64IFD-NEXT: fadd.d ft0, ft2, ft0 -; RV64IFD-NEXT: fnmadd.d ft0, ft3, ft1, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a2 +; RV64IFD-NEXT: lui a2, %hi(.LCPI16_0) +; RV64IFD-NEXT: addi a2, a2, %lo(.LCPI16_0) +; RV64IFD-NEXT: fld ft1, 0(a2) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fmv.d.x ft2, a0 +; RV64IFD-NEXT: fadd.d ft1, ft2, ft1 +; RV64IFD-NEXT: fmv.d.x ft2, a1 +; RV64IFD-NEXT: fnmadd.d ft0, ft1, ft2, ft0 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret %a_ = fadd double 0.0, %a @@ -561,13 +561,13 @@ define double @fnmsub_d(double %a, double %b, double %c) nounwind { ; ; RV64IFD-LABEL: fnmsub_d: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a3, %hi(.LCPI17_0) -; RV64IFD-NEXT: addi a3, a3, %lo(.LCPI17_0) -; RV64IFD-NEXT: fld ft0, 0(a3) +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: lui a0, %hi(.LCPI17_0) +; RV64IFD-NEXT: addi a0, a0, %lo(.LCPI17_0) +; RV64IFD-NEXT: fld ft1, 0(a0) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 ; RV64IFD-NEXT: fmv.d.x ft1, a2 ; RV64IFD-NEXT: fmv.d.x ft2, a1 -; RV64IFD-NEXT: fmv.d.x ft3, a0 -; RV64IFD-NEXT: fadd.d ft0, ft3, ft0 ; RV64IFD-NEXT: fnmsub.d ft0, ft0, ft2, ft1 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll index f80e140..626a239 100644 --- a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll +++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll @@ -118,9 +118,9 @@ define double @fcopysign_fneg(double %a, double %b) nounwind { ; ; RV64I-LABEL: fcopysign_fneg: ; RV64I: # %bb.0: -; RV64I-NEXT: not a1, a1 ; RV64I-NEXT: addi a2, zero, -1 ; RV64I-NEXT: slli a2, a2, 63 +; RV64I-NEXT: not a1, a1 ; RV64I-NEXT: and a1, a1, a2 ; RV64I-NEXT: addi a2, a2, -1 ; RV64I-NEXT: and a0, a0, a2 diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll index 356f632..1c8b867 100644 --- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll @@ -385,11 +385,11 @@ define void @br_fcmp_ord(double %a, double %b) nounwind { ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -16 ; RV64IFD-NEXT: sd ra, 8(sp) -; RV64IFD-NEXT: fmv.d.x ft0, a0 -; RV64IFD-NEXT: fmv.d.x ft1, a1 -; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: feq.d a1, ft0, ft0 -; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft0 +; RV64IFD-NEXT: and a0, a0, a1 ; RV64IFD-NEXT: bnez a0, .LBB8_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: ld ra, 8(sp) @@ -712,11 +712,11 @@ define void @br_fcmp_uno(double %a, double %b) nounwind { ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -16 ; RV64IFD-NEXT: sd ra, 8(sp) -; RV64IFD-NEXT: fmv.d.x ft0, a0 -; RV64IFD-NEXT: fmv.d.x ft1, a1 -; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: feq.d a1, ft0, ft0 -; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft0 +; RV64IFD-NEXT: and a0, a0, a1 ; RV64IFD-NEXT: seqz a0, a0 ; RV64IFD-NEXT: bnez a0, .LBB15_2 ; RV64IFD-NEXT: # %bb.1: # %if.else diff --git a/llvm/test/CodeGen/RISCV/double-calling-conv.ll b/llvm/test/CodeGen/RISCV/double-calling-conv.ll index 837cb81..f3cb0fe 100644 --- a/llvm/test/CodeGen/RISCV/double-calling-conv.ll +++ b/llvm/test/CodeGen/RISCV/double-calling-conv.ll @@ -76,16 +76,16 @@ define double @caller_double_split_reg_stack() nounwind { ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) ; RV32IFD-NEXT: lui a0, 262510 -; RV32IFD-NEXT: addi a2, a0, 327 +; RV32IFD-NEXT: addi a0, a0, 327 +; RV32IFD-NEXT: sw a0, 0(sp) ; RV32IFD-NEXT: lui a0, 262446 ; RV32IFD-NEXT: addi a6, a0, 327 ; RV32IFD-NEXT: lui a0, 713032 ; RV32IFD-NEXT: addi a5, a0, -1311 ; RV32IFD-NEXT: addi a0, zero, 1 ; RV32IFD-NEXT: addi a1, zero, 2 -; RV32IFD-NEXT: addi a3, zero, 3 -; RV32IFD-NEXT: sw a2, 0(sp) ; RV32IFD-NEXT: mv a2, zero +; RV32IFD-NEXT: addi a3, zero, 3 ; RV32IFD-NEXT: mv a4, zero ; RV32IFD-NEXT: mv a7, a5 ; RV32IFD-NEXT: call callee_double_split_reg_stack @@ -120,20 +120,20 @@ define double @caller_double_stack() nounwind { ; RV32IFD-NEXT: lui a0, 262510 ; RV32IFD-NEXT: addi a0, a0, 327 ; RV32IFD-NEXT: sw a0, 4(sp) -; RV32IFD-NEXT: lui a0, 713032 -; RV32IFD-NEXT: addi a1, a0, -1311 -; RV32IFD-NEXT: sw a1, 0(sp) ; RV32IFD-NEXT: lui a0, 262574 ; RV32IFD-NEXT: addi a0, a0, 327 ; RV32IFD-NEXT: sw a0, 12(sp) +; RV32IFD-NEXT: lui a0, 713032 +; RV32IFD-NEXT: addi a0, a0, -1311 +; RV32IFD-NEXT: sw a0, 0(sp) +; RV32IFD-NEXT: sw a0, 8(sp) ; RV32IFD-NEXT: addi a0, zero, 1 -; RV32IFD-NEXT: addi a2, zero, 2 -; RV32IFD-NEXT: addi a4, zero, 3 -; RV32IFD-NEXT: addi a6, zero, 4 -; RV32IFD-NEXT: sw a1, 8(sp) ; RV32IFD-NEXT: mv a1, zero +; RV32IFD-NEXT: addi a2, zero, 2 ; RV32IFD-NEXT: mv a3, zero +; RV32IFD-NEXT: addi a4, zero, 3 ; RV32IFD-NEXT: mv a5, zero +; RV32IFD-NEXT: addi a6, zero, 4 ; RV32IFD-NEXT: mv a7, zero ; RV32IFD-NEXT: call callee_double_stack ; RV32IFD-NEXT: lw ra, 28(sp) diff --git a/llvm/test/CodeGen/RISCV/double-convert.ll b/llvm/test/CodeGen/RISCV/double-convert.ll index f7abdea..1d30fda 100644 --- a/llvm/test/CodeGen/RISCV/double-convert.ll +++ b/llvm/test/CodeGen/RISCV/double-convert.ll @@ -257,9 +257,9 @@ define double @fmv_d_x(i64 %a, i64 %b) nounwind { ; ; RV64IFD-LABEL: fmv_d_x: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: fmv.d.x ft0, a0 -; RV64IFD-NEXT: fmv.d.x ft1, a1 -; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret %1 = bitcast i64 %a to double diff --git a/llvm/test/CodeGen/RISCV/double-fcmp.ll b/llvm/test/CodeGen/RISCV/double-fcmp.ll index 2887522..0046b59 100644 --- a/llvm/test/CodeGen/RISCV/double-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-fcmp.ll @@ -197,11 +197,11 @@ define i32 @fcmp_ord(double %a, double %b) nounwind { ; ; RV64IFD-LABEL: fcmp_ord: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: fmv.d.x ft0, a0 -; RV64IFD-NEXT: fmv.d.x ft1, a1 -; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: feq.d a1, ft0, ft0 -; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft0 +; RV64IFD-NEXT: and a0, a0, a1 ; RV64IFD-NEXT: ret %1 = fcmp ord double %a, %b %2 = zext i1 %1 to i32 @@ -397,11 +397,11 @@ define i32 @fcmp_uno(double %a, double %b) nounwind { ; ; RV64IFD-LABEL: fcmp_uno: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: fmv.d.x ft0, a0 -; RV64IFD-NEXT: fmv.d.x ft1, a1 -; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: feq.d a1, ft0, ft0 -; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: feq.d a0, ft0, ft0 +; RV64IFD-NEXT: and a0, a0, a1 ; RV64IFD-NEXT: seqz a0, a0 ; RV64IFD-NEXT: ret %1 = fcmp uno double %a, %b diff --git a/llvm/test/CodeGen/RISCV/double-imm.ll b/llvm/test/CodeGen/RISCV/double-imm.ll index 3cd39796..7f83564 100644 --- a/llvm/test/CodeGen/RISCV/double-imm.ll +++ b/llvm/test/CodeGen/RISCV/double-imm.ll @@ -49,11 +49,11 @@ define double @double_imm_op(double %a) nounwind { ; ; RV64IFD-LABEL: double_imm_op: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a1, %hi(.LCPI1_0) -; RV64IFD-NEXT: addi a1, a1, %lo(.LCPI1_0) -; RV64IFD-NEXT: fld ft0, 0(a1) -; RV64IFD-NEXT: fmv.d.x ft1, a0 -; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: lui a0, %hi(.LCPI1_0) +; RV64IFD-NEXT: addi a0, a0, %lo(.LCPI1_0) +; RV64IFD-NEXT: fld ft1, 0(a0) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret %1 = fadd double %a, 1.0 diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll index b887388..2932745 100644 --- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll @@ -111,24 +111,28 @@ define double @sincos_f64(double %a) nounwind { ; RV32IFD-NEXT: sw ra, 28(sp) ; RV32IFD-NEXT: sw s0, 24(sp) ; RV32IFD-NEXT: sw s1, 20(sp) +; RV32IFD-NEXT: sw s2, 16(sp) +; RV32IFD-NEXT: sw s3, 12(sp) ; RV32IFD-NEXT: mv s0, a1 ; RV32IFD-NEXT: mv s1, a0 ; RV32IFD-NEXT: call sin -; RV32IFD-NEXT: sw a0, 8(sp) -; RV32IFD-NEXT: sw a1, 12(sp) -; RV32IFD-NEXT: fld ft0, 8(sp) -; RV32IFD-NEXT: fsd ft0, 0(sp) +; RV32IFD-NEXT: mv s2, a0 +; RV32IFD-NEXT: mv s3, a1 ; RV32IFD-NEXT: mv a0, s1 ; RV32IFD-NEXT: mv a1, s0 ; RV32IFD-NEXT: call cos -; RV32IFD-NEXT: sw a0, 8(sp) -; RV32IFD-NEXT: sw a1, 12(sp) -; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 0(sp) +; RV32IFD-NEXT: sw a1, 4(sp) +; RV32IFD-NEXT: fld ft0, 0(sp) +; RV32IFD-NEXT: sw s2, 0(sp) +; RV32IFD-NEXT: sw s3, 4(sp) ; RV32IFD-NEXT: fld ft1, 0(sp) ; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 -; RV32IFD-NEXT: fsd ft0, 8(sp) -; RV32IFD-NEXT: lw a0, 8(sp) -; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: fsd ft0, 0(sp) +; RV32IFD-NEXT: lw a0, 0(sp) +; RV32IFD-NEXT: lw a1, 4(sp) +; RV32IFD-NEXT: lw s3, 12(sp) +; RV32IFD-NEXT: lw s2, 16(sp) ; RV32IFD-NEXT: lw s1, 20(sp) ; RV32IFD-NEXT: lw s0, 24(sp) ; RV32IFD-NEXT: lw ra, 28(sp) @@ -140,16 +144,17 @@ define double @sincos_f64(double %a) nounwind { ; RV64IFD-NEXT: addi sp, sp, -32 ; RV64IFD-NEXT: sd ra, 24(sp) ; RV64IFD-NEXT: sd s0, 16(sp) +; RV64IFD-NEXT: sd s1, 8(sp) ; RV64IFD-NEXT: mv s0, a0 ; RV64IFD-NEXT: call sin -; RV64IFD-NEXT: fmv.d.x ft0, a0 -; RV64IFD-NEXT: fsd ft0, 8(sp) +; RV64IFD-NEXT: mv s1, a0 ; RV64IFD-NEXT: mv a0, s0 ; RV64IFD-NEXT: call cos ; RV64IFD-NEXT: fmv.d.x ft0, a0 -; RV64IFD-NEXT: fld ft1, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft1, s1 ; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 ; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ld s1, 8(sp) ; RV64IFD-NEXT: ld s0, 16(sp) ; RV64IFD-NEXT: ld ra, 24(sp) ; RV64IFD-NEXT: addi sp, sp, 32 @@ -345,17 +350,17 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ; RV32IFD-LABEL: fmuladd_f64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 -; RV32IFD-NEXT: sw a4, 8(sp) -; RV32IFD-NEXT: sw a5, 12(sp) -; RV32IFD-NEXT: fld ft0, 8(sp) ; RV32IFD-NEXT: sw a2, 8(sp) ; RV32IFD-NEXT: sw a3, 12(sp) -; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) ; RV32IFD-NEXT: sw a0, 8(sp) ; RV32IFD-NEXT: sw a1, 12(sp) -; RV32IFD-NEXT: fld ft2, 8(sp) -; RV32IFD-NEXT: fmul.d ft1, ft2, ft1 -; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fmul.d ft0, ft1, ft0 +; RV32IFD-NEXT: sw a4, 8(sp) +; RV32IFD-NEXT: sw a5, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fadd.d ft0, ft0, ft1 ; RV32IFD-NEXT: fsd ft0, 8(sp) ; RV32IFD-NEXT: lw a0, 8(sp) ; RV32IFD-NEXT: lw a1, 12(sp) @@ -364,11 +369,11 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ; ; RV64IFD-LABEL: fmuladd_f64: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: fmv.d.x ft0, a2 -; RV64IFD-NEXT: fmv.d.x ft1, a1 -; RV64IFD-NEXT: fmv.d.x ft2, a0 -; RV64IFD-NEXT: fmul.d ft1, ft2, ft1 -; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmul.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.d.x ft1, a2 +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c) diff --git a/llvm/test/CodeGen/RISCV/double-mem.ll b/llvm/test/CodeGen/RISCV/double-mem.ll index ad6a3af..76be198 100644 --- a/llvm/test/CodeGen/RISCV/double-mem.ll +++ b/llvm/test/CodeGen/RISCV/double-mem.ll @@ -8,9 +8,9 @@ define double @fld(double *%a) nounwind { ; RV32IFD-LABEL: fld: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 -; RV32IFD-NEXT: fld ft0, 0(a0) -; RV32IFD-NEXT: fld ft1, 24(a0) -; RV32IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV32IFD-NEXT: fld ft0, 24(a0) +; RV32IFD-NEXT: fld ft1, 0(a0) +; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 ; RV32IFD-NEXT: fsd ft0, 8(sp) ; RV32IFD-NEXT: lw a0, 8(sp) ; RV32IFD-NEXT: lw a1, 12(sp) @@ -19,9 +19,9 @@ define double @fld(double *%a) nounwind { ; ; RV64IFD-LABEL: fld: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: fld ft0, 0(a0) -; RV64IFD-NEXT: fld ft1, 24(a0) -; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fld ft0, 24(a0) +; RV64IFD-NEXT: fld ft1, 0(a0) +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret %1 = load double, double* %a @@ -44,8 +44,8 @@ define void @fsd(double *%a, double %b, double %c) nounwind { ; RV32IFD-NEXT: sw a2, 12(sp) ; RV32IFD-NEXT: fld ft1, 8(sp) ; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 -; RV32IFD-NEXT: fsd ft0, 0(a0) ; RV32IFD-NEXT: fsd ft0, 64(a0) +; RV32IFD-NEXT: fsd ft0, 0(a0) ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; @@ -54,8 +54,8 @@ define void @fsd(double *%a, double %b, double %c) nounwind { ; RV64IFD-NEXT: fmv.d.x ft0, a2 ; RV64IFD-NEXT: fmv.d.x ft1, a1 ; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 -; RV64IFD-NEXT: fsd ft0, 0(a0) ; RV64IFD-NEXT: fsd ft0, 64(a0) +; RV64IFD-NEXT: fsd ft0, 0(a0) ; RV64IFD-NEXT: ret ; Use %b and %c in an FP op to ensure floating point registers are used, even ; for the soft float ABI @@ -100,10 +100,10 @@ define double @fld_fsd_global(double %a, double %b) nounwind { ; RV64IFD-NEXT: lui a0, %hi(G) ; RV64IFD-NEXT: fld ft1, %lo(G)(a0) ; RV64IFD-NEXT: fsd ft0, %lo(G)(a0) -; RV64IFD-NEXT: addi a1, a0, %lo(G) -; RV64IFD-NEXT: fld ft1, 72(a1) +; RV64IFD-NEXT: addi a0, a0, %lo(G) +; RV64IFD-NEXT: fld ft1, 72(a0) +; RV64IFD-NEXT: fsd ft0, 72(a0) ; RV64IFD-NEXT: fmv.x.d a0, ft0 -; RV64IFD-NEXT: fsd ft0, 72(a1) ; RV64IFD-NEXT: ret ; Use %a and %b in an FP op to ensure floating point registers are used, even ; for the soft float ABI @@ -136,14 +136,14 @@ define double @fld_fsd_constant(double %a) nounwind { ; ; RV64IFD-LABEL: fld_fsd_constant: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: lui a1, 56 -; RV64IFD-NEXT: addiw a1, a1, -1353 -; RV64IFD-NEXT: slli a1, a1, 14 -; RV64IFD-NEXT: fld ft0, -273(a1) -; RV64IFD-NEXT: fmv.d.x ft1, a0 -; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a0 +; RV64IFD-NEXT: lui a0, 56 +; RV64IFD-NEXT: addiw a0, a0, -1353 +; RV64IFD-NEXT: slli a0, a0, 14 +; RV64IFD-NEXT: fld ft1, -273(a0) +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fsd ft0, -273(a0) ; RV64IFD-NEXT: fmv.x.d a0, ft0 -; RV64IFD-NEXT: fsd ft0, -273(a1) ; RV64IFD-NEXT: ret %1 = inttoptr i32 3735928559 to double* %2 = load volatile double, double* %1 @@ -159,18 +159,22 @@ define double @fld_stack(double %a) nounwind { ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -32 ; RV32IFD-NEXT: sw ra, 28(sp) -; RV32IFD-NEXT: sw a0, 8(sp) -; RV32IFD-NEXT: sw a1, 12(sp) -; RV32IFD-NEXT: fld ft0, 8(sp) -; RV32IFD-NEXT: fsd ft0, 0(sp) -; RV32IFD-NEXT: addi a0, sp, 16 +; RV32IFD-NEXT: sw s0, 24(sp) +; RV32IFD-NEXT: sw s1, 20(sp) +; RV32IFD-NEXT: mv s0, a1 +; RV32IFD-NEXT: mv s1, a0 +; RV32IFD-NEXT: addi a0, sp, 8 ; RV32IFD-NEXT: call notdead -; RV32IFD-NEXT: fld ft0, 16(sp) -; RV32IFD-NEXT: fld ft1, 0(sp) -; RV32IFD-NEXT: fadd.d ft0, ft0, ft1 -; RV32IFD-NEXT: fsd ft0, 8(sp) -; RV32IFD-NEXT: lw a0, 8(sp) -; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: sw s1, 0(sp) +; RV32IFD-NEXT: sw s0, 4(sp) +; RV32IFD-NEXT: fld ft0, 0(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV32IFD-NEXT: fsd ft0, 0(sp) +; RV32IFD-NEXT: lw a0, 0(sp) +; RV32IFD-NEXT: lw a1, 4(sp) +; RV32IFD-NEXT: lw s1, 20(sp) +; RV32IFD-NEXT: lw s0, 24(sp) ; RV32IFD-NEXT: lw ra, 28(sp) ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret @@ -179,14 +183,15 @@ define double @fld_stack(double %a) nounwind { ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -32 ; RV64IFD-NEXT: sd ra, 24(sp) -; RV64IFD-NEXT: fmv.d.x ft0, a0 -; RV64IFD-NEXT: fsd ft0, 8(sp) -; RV64IFD-NEXT: addi a0, sp, 16 +; RV64IFD-NEXT: sd s0, 16(sp) +; RV64IFD-NEXT: mv s0, a0 +; RV64IFD-NEXT: addi a0, sp, 8 ; RV64IFD-NEXT: call notdead -; RV64IFD-NEXT: fld ft0, 16(sp) +; RV64IFD-NEXT: fmv.d.x ft0, s0 ; RV64IFD-NEXT: fld ft1, 8(sp) -; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 +; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 ; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ld s0, 16(sp) ; RV64IFD-NEXT: ld ra, 24(sp) ; RV64IFD-NEXT: addi sp, sp, 32 ; RV64IFD-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/double-previous-failure.ll b/llvm/test/CodeGen/RISCV/double-previous-failure.ll index eb58ba4..168c6c2 100644 --- a/llvm/test/CodeGen/RISCV/double-previous-failure.ll +++ b/llvm/test/CodeGen/RISCV/double-previous-failure.ll @@ -17,8 +17,8 @@ define i32 @main() nounwind { ; RV32IFD: # %bb.0: # %entry ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) -; RV32IFD-NEXT: lui a1, 262144 ; RV32IFD-NEXT: mv a0, zero +; RV32IFD-NEXT: lui a1, 262144 ; RV32IFD-NEXT: call test ; RV32IFD-NEXT: sw a0, 0(sp) ; RV32IFD-NEXT: sw a1, 4(sp) diff --git a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll index 12789c0..321adce 100644 --- a/llvm/test/CodeGen/RISCV/double-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/double-select-fcmp.ll @@ -298,23 +298,23 @@ define double @select_fcmp_ueq(double %a, double %b) nounwind { ; RV32IFD-LABEL: select_fcmp_ueq: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 -; RV32IFD-NEXT: sw a2, 8(sp) -; RV32IFD-NEXT: sw a3, 12(sp) -; RV32IFD-NEXT: fld ft0, 8(sp) ; RV32IFD-NEXT: sw a0, 8(sp) ; RV32IFD-NEXT: sw a1, 12(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a2, 8(sp) +; RV32IFD-NEXT: sw a3, 12(sp) ; RV32IFD-NEXT: fld ft1, 8(sp) -; RV32IFD-NEXT: feq.d a0, ft1, ft0 +; RV32IFD-NEXT: feq.d a0, ft1, ft1 ; RV32IFD-NEXT: feq.d a1, ft0, ft0 -; RV32IFD-NEXT: feq.d a2, ft1, ft1 -; RV32IFD-NEXT: and a1, a2, a1 -; RV32IFD-NEXT: seqz a1, a1 -; RV32IFD-NEXT: or a0, a0, a1 +; RV32IFD-NEXT: and a0, a1, a0 +; RV32IFD-NEXT: seqz a0, a0 +; RV32IFD-NEXT: feq.d a1, ft0, ft1 +; RV32IFD-NEXT: or a0, a1, a0 ; RV32IFD-NEXT: bnez a0, .LBB8_2 ; RV32IFD-NEXT: # %bb.1: -; RV32IFD-NEXT: fmv.d ft1, ft0 +; RV32IFD-NEXT: fmv.d ft0, ft1 ; RV32IFD-NEXT: .LBB8_2: -; RV32IFD-NEXT: fsd ft1, 8(sp) +; RV32IFD-NEXT: fsd ft0, 8(sp) ; RV32IFD-NEXT: lw a0, 8(sp) ; RV32IFD-NEXT: lw a1, 12(sp) ; RV32IFD-NEXT: addi sp, sp, 16 @@ -322,14 +322,14 @@ define double @select_fcmp_ueq(double %a, double %b) nounwind { ; ; RV64IFD-LABEL: select_fcmp_ueq: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: fmv.d.x ft1, a1 ; RV64IFD-NEXT: fmv.d.x ft0, a0 -; RV64IFD-NEXT: feq.d a0, ft0, ft1 -; RV64IFD-NEXT: feq.d a1, ft1, ft1 -; RV64IFD-NEXT: feq.d a2, ft0, ft0 -; RV64IFD-NEXT: and a1, a2, a1 -; RV64IFD-NEXT: seqz a1, a1 -; RV64IFD-NEXT: or a0, a0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a1 +; RV64IFD-NEXT: feq.d a0, ft1, ft1 +; RV64IFD-NEXT: feq.d a1, ft0, ft0 +; RV64IFD-NEXT: and a0, a1, a0 +; RV64IFD-NEXT: seqz a0, a0 +; RV64IFD-NEXT: feq.d a1, ft0, ft1 +; RV64IFD-NEXT: or a0, a1, a0 ; RV64IFD-NEXT: bnez a0, .LBB8_2 ; RV64IFD-NEXT: # %bb.1: ; RV64IFD-NEXT: fmv.d ft0, ft1 @@ -604,12 +604,12 @@ define i32 @i32_select_fcmp_oeq(double %a, double %b, i32 %c, i32 %d) nounwind { ; RV32IFD-NEXT: sw a0, 8(sp) ; RV32IFD-NEXT: sw a1, 12(sp) ; RV32IFD-NEXT: fld ft1, 8(sp) -; RV32IFD-NEXT: feq.d a1, ft1, ft0 -; RV32IFD-NEXT: mv a0, a4 -; RV32IFD-NEXT: bnez a1, .LBB16_2 +; RV32IFD-NEXT: feq.d a0, ft1, ft0 +; RV32IFD-NEXT: bnez a0, .LBB16_2 ; RV32IFD-NEXT: # %bb.1: -; RV32IFD-NEXT: mv a0, a5 +; RV32IFD-NEXT: mv a4, a5 ; RV32IFD-NEXT: .LBB16_2: +; RV32IFD-NEXT: mv a0, a4 ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; @@ -617,12 +617,12 @@ define i32 @i32_select_fcmp_oeq(double %a, double %b, i32 %c, i32 %d) nounwind { ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fmv.d.x ft0, a1 ; RV64IFD-NEXT: fmv.d.x ft1, a0 -; RV64IFD-NEXT: feq.d a1, ft1, ft0 -; RV64IFD-NEXT: mv a0, a2 -; RV64IFD-NEXT: bnez a1, .LBB16_2 +; RV64IFD-NEXT: feq.d a0, ft1, ft0 +; RV64IFD-NEXT: bnez a0, .LBB16_2 ; RV64IFD-NEXT: # %bb.1: -; RV64IFD-NEXT: mv a0, a3 +; RV64IFD-NEXT: mv a2, a3 ; RV64IFD-NEXT: .LBB16_2: +; RV64IFD-NEXT: mv a0, a2 ; RV64IFD-NEXT: ret %1 = fcmp oeq double %a, %b %2 = select i1 %1, i32 %c, i32 %d diff --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll index a595cd8..28a9e12 100644 --- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll +++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll @@ -37,10 +37,10 @@ define double @func(double %d, i32 %n) nounwind { ; RV64IFD: # %bb.0: # %entry ; RV64IFD-NEXT: addi sp, sp, -16 ; RV64IFD-NEXT: sd ra, 8(sp) -; RV64IFD-NEXT: slli a2, a1, 32 -; RV64IFD-NEXT: srli a2, a2, 32 ; RV64IFD-NEXT: fmv.d.x ft0, a0 -; RV64IFD-NEXT: beqz a2, .LBB0_2 +; RV64IFD-NEXT: slli a0, a1, 32 +; RV64IFD-NEXT: srli a0, a0, 32 +; RV64IFD-NEXT: beqz a0, .LBB0_2 ; RV64IFD-NEXT: # %bb.1: # %if.else ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: fmv.x.d a0, ft0 diff --git a/llvm/test/CodeGen/RISCV/float-arith.ll b/llvm/test/CodeGen/RISCV/float-arith.ll index 5244a69..a668b7e 100644 --- a/llvm/test/CodeGen/RISCV/float-arith.ll +++ b/llvm/test/CodeGen/RISCV/float-arith.ll @@ -339,26 +339,26 @@ define float @fmadd_s(float %a, float %b, float %c) nounwind { define float @fmsub_s(float %a, float %b, float %c) nounwind { ; RV32IF-LABEL: fmsub_s: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a3, %hi(.LCPI15_0) -; RV32IF-NEXT: addi a3, a3, %lo(.LCPI15_0) -; RV32IF-NEXT: flw ft0, 0(a3) +; RV32IF-NEXT: fmv.w.x ft0, a2 +; RV32IF-NEXT: lui a2, %hi(.LCPI15_0) +; RV32IF-NEXT: addi a2, a2, %lo(.LCPI15_0) +; RV32IF-NEXT: flw ft1, 0(a2) +; RV32IF-NEXT: fadd.s ft0, ft0, ft1 ; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: fmv.w.x ft2, a0 -; RV32IF-NEXT: fmv.w.x ft3, a2 -; RV32IF-NEXT: fadd.s ft0, ft3, ft0 ; RV32IF-NEXT: fmsub.s ft0, ft2, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fmsub_s: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a3, %hi(.LCPI15_0) -; RV64IF-NEXT: addi a3, a3, %lo(.LCPI15_0) -; RV64IF-NEXT: flw ft0, 0(a3) +; RV64IF-NEXT: fmv.w.x ft0, a2 +; RV64IF-NEXT: lui a2, %hi(.LCPI15_0) +; RV64IF-NEXT: addi a2, a2, %lo(.LCPI15_0) +; RV64IF-NEXT: flw ft1, 0(a2) +; RV64IF-NEXT: fadd.s ft0, ft0, ft1 ; RV64IF-NEXT: fmv.w.x ft1, a1 ; RV64IF-NEXT: fmv.w.x ft2, a0 -; RV64IF-NEXT: fmv.w.x ft3, a2 -; RV64IF-NEXT: fadd.s ft0, ft3, ft0 ; RV64IF-NEXT: fmsub.s ft0, ft2, ft1, ft0 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret @@ -371,29 +371,29 @@ define float @fmsub_s(float %a, float %b, float %c) nounwind { define float @fnmadd_s(float %a, float %b, float %c) nounwind { ; RV32IF-LABEL: fnmadd_s: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a3, %hi(.LCPI16_0) -; RV32IF-NEXT: addi a3, a3, %lo(.LCPI16_0) -; RV32IF-NEXT: flw ft0, 0(a3) -; RV32IF-NEXT: fmv.w.x ft1, a1 -; RV32IF-NEXT: fmv.w.x ft2, a2 -; RV32IF-NEXT: fmv.w.x ft3, a0 -; RV32IF-NEXT: fadd.s ft3, ft3, ft0 -; RV32IF-NEXT: fadd.s ft0, ft2, ft0 -; RV32IF-NEXT: fnmadd.s ft0, ft3, ft1, ft0 +; RV32IF-NEXT: fmv.w.x ft0, a2 +; RV32IF-NEXT: lui a2, %hi(.LCPI16_0) +; RV32IF-NEXT: addi a2, a2, %lo(.LCPI16_0) +; RV32IF-NEXT: flw ft1, 0(a2) +; RV32IF-NEXT: fadd.s ft0, ft0, ft1 +; RV32IF-NEXT: fmv.w.x ft2, a0 +; RV32IF-NEXT: fadd.s ft1, ft2, ft1 +; RV32IF-NEXT: fmv.w.x ft2, a1 +; RV32IF-NEXT: fnmadd.s ft0, ft1, ft2, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fnmadd_s: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a3, %hi(.LCPI16_0) -; RV64IF-NEXT: addi a3, a3, %lo(.LCPI16_0) -; RV64IF-NEXT: flw ft0, 0(a3) -; RV64IF-NEXT: fmv.w.x ft1, a1 -; RV64IF-NEXT: fmv.w.x ft2, a2 -; RV64IF-NEXT: fmv.w.x ft3, a0 -; RV64IF-NEXT: fadd.s ft3, ft3, ft0 -; RV64IF-NEXT: fadd.s ft0, ft2, ft0 -; RV64IF-NEXT: fnmadd.s ft0, ft3, ft1, ft0 +; RV64IF-NEXT: fmv.w.x ft0, a2 +; RV64IF-NEXT: lui a2, %hi(.LCPI16_0) +; RV64IF-NEXT: addi a2, a2, %lo(.LCPI16_0) +; RV64IF-NEXT: flw ft1, 0(a2) +; RV64IF-NEXT: fadd.s ft0, ft0, ft1 +; RV64IF-NEXT: fmv.w.x ft2, a0 +; RV64IF-NEXT: fadd.s ft1, ft2, ft1 +; RV64IF-NEXT: fmv.w.x ft2, a1 +; RV64IF-NEXT: fnmadd.s ft0, ft1, ft2, ft0 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret %a_ = fadd float 0.0, %a @@ -407,26 +407,26 @@ define float @fnmadd_s(float %a, float %b, float %c) nounwind { define float @fnmsub_s(float %a, float %b, float %c) nounwind { ; RV32IF-LABEL: fnmsub_s: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a3, %hi(.LCPI17_0) -; RV32IF-NEXT: addi a3, a3, %lo(.LCPI17_0) -; RV32IF-NEXT: flw ft0, 0(a3) +; RV32IF-NEXT: fmv.w.x ft0, a0 +; RV32IF-NEXT: lui a0, %hi(.LCPI17_0) +; RV32IF-NEXT: addi a0, a0, %lo(.LCPI17_0) +; RV32IF-NEXT: flw ft1, 0(a0) +; RV32IF-NEXT: fadd.s ft0, ft0, ft1 ; RV32IF-NEXT: fmv.w.x ft1, a2 ; RV32IF-NEXT: fmv.w.x ft2, a1 -; RV32IF-NEXT: fmv.w.x ft3, a0 -; RV32IF-NEXT: fadd.s ft0, ft3, ft0 ; RV32IF-NEXT: fnmsub.s ft0, ft0, ft2, ft1 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fnmsub_s: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a3, %hi(.LCPI17_0) -; RV64IF-NEXT: addi a3, a3, %lo(.LCPI17_0) -; RV64IF-NEXT: flw ft0, 0(a3) +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: lui a0, %hi(.LCPI17_0) +; RV64IF-NEXT: addi a0, a0, %lo(.LCPI17_0) +; RV64IF-NEXT: flw ft1, 0(a0) +; RV64IF-NEXT: fadd.s ft0, ft0, ft1 ; RV64IF-NEXT: fmv.w.x ft1, a2 ; RV64IF-NEXT: fmv.w.x ft2, a1 -; RV64IF-NEXT: fmv.w.x ft3, a0 -; RV64IF-NEXT: fadd.s ft0, ft3, ft0 ; RV64IF-NEXT: fnmsub.s ft0, ft0, ft2, ft1 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll index a1e5b32..425da73 100644 --- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll @@ -337,11 +337,11 @@ define void @br_fcmp_ord(float %a, float %b) nounwind { ; RV32IF: # %bb.0: ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) -; RV32IF-NEXT: fmv.w.x ft0, a0 -; RV32IF-NEXT: fmv.w.x ft1, a1 -; RV32IF-NEXT: feq.s a0, ft1, ft1 +; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: feq.s a1, ft0, ft0 -; RV32IF-NEXT: and a0, a1, a0 +; RV32IF-NEXT: fmv.w.x ft0, a0 +; RV32IF-NEXT: feq.s a0, ft0, ft0 +; RV32IF-NEXT: and a0, a0, a1 ; RV32IF-NEXT: bnez a0, .LBB8_2 ; RV32IF-NEXT: # %bb.1: # %if.else ; RV32IF-NEXT: lw ra, 12(sp) @@ -354,11 +354,11 @@ define void @br_fcmp_ord(float %a, float %b) nounwind { ; RV64IF: # %bb.0: ; RV64IF-NEXT: addi sp, sp, -16 ; RV64IF-NEXT: sd ra, 8(sp) -; RV64IF-NEXT: fmv.w.x ft0, a0 -; RV64IF-NEXT: fmv.w.x ft1, a1 -; RV64IF-NEXT: feq.s a0, ft1, ft1 +; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: feq.s a1, ft0, ft0 -; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: feq.s a0, ft0, ft0 +; RV64IF-NEXT: and a0, a0, a1 ; RV64IF-NEXT: bnez a0, .LBB8_2 ; RV64IF-NEXT: # %bb.1: # %if.else ; RV64IF-NEXT: ld ra, 8(sp) @@ -635,11 +635,11 @@ define void @br_fcmp_uno(float %a, float %b) nounwind { ; RV32IF: # %bb.0: ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) -; RV32IF-NEXT: fmv.w.x ft0, a0 -; RV32IF-NEXT: fmv.w.x ft1, a1 -; RV32IF-NEXT: feq.s a0, ft1, ft1 +; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: feq.s a1, ft0, ft0 -; RV32IF-NEXT: and a0, a1, a0 +; RV32IF-NEXT: fmv.w.x ft0, a0 +; RV32IF-NEXT: feq.s a0, ft0, ft0 +; RV32IF-NEXT: and a0, a0, a1 ; RV32IF-NEXT: seqz a0, a0 ; RV32IF-NEXT: bnez a0, .LBB15_2 ; RV32IF-NEXT: # %bb.1: # %if.else @@ -653,11 +653,11 @@ define void @br_fcmp_uno(float %a, float %b) nounwind { ; RV64IF: # %bb.0: ; RV64IF-NEXT: addi sp, sp, -16 ; RV64IF-NEXT: sd ra, 8(sp) -; RV64IF-NEXT: fmv.w.x ft0, a0 -; RV64IF-NEXT: fmv.w.x ft1, a1 -; RV64IF-NEXT: feq.s a0, ft1, ft1 +; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: feq.s a1, ft0, ft0 -; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: feq.s a0, ft0, ft0 +; RV64IF-NEXT: and a0, a0, a1 ; RV64IF-NEXT: seqz a0, a0 ; RV64IF-NEXT: bnez a0, .LBB15_2 ; RV64IF-NEXT: # %bb.1: # %if.else diff --git a/llvm/test/CodeGen/RISCV/float-convert.ll b/llvm/test/CodeGen/RISCV/float-convert.ll index 35a71c4..296e9d4 100644 --- a/llvm/test/CodeGen/RISCV/float-convert.ll +++ b/llvm/test/CodeGen/RISCV/float-convert.ll @@ -97,17 +97,17 @@ define float @fcvt_s_wu(i32 %a) nounwind { define float @fmv_w_x(i32 %a, i32 %b) nounwind { ; RV32IF-LABEL: fmv_w_x: ; RV32IF: # %bb.0: -; RV32IF-NEXT: fmv.w.x ft0, a0 -; RV32IF-NEXT: fmv.w.x ft1, a1 -; RV32IF-NEXT: fadd.s ft0, ft0, ft1 +; RV32IF-NEXT: fmv.w.x ft0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fadd.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fmv_w_x: ; RV64IF: # %bb.0: -; RV64IF-NEXT: fmv.w.x ft0, a0 -; RV64IF-NEXT: fmv.w.x ft1, a1 -; RV64IF-NEXT: fadd.s ft0, ft0, ft1 +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret ; Ensure fmv.w.x is generated even for a soft float calling convention diff --git a/llvm/test/CodeGen/RISCV/float-fcmp.ll b/llvm/test/CodeGen/RISCV/float-fcmp.ll index 3a44020..43a849b 100644 --- a/llvm/test/CodeGen/RISCV/float-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-fcmp.ll @@ -146,20 +146,20 @@ define i32 @fcmp_one(float %a, float %b) nounwind { define i32 @fcmp_ord(float %a, float %b) nounwind { ; RV32IF-LABEL: fcmp_ord: ; RV32IF: # %bb.0: -; RV32IF-NEXT: fmv.w.x ft0, a0 -; RV32IF-NEXT: fmv.w.x ft1, a1 -; RV32IF-NEXT: feq.s a0, ft1, ft1 +; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: feq.s a1, ft0, ft0 -; RV32IF-NEXT: and a0, a1, a0 +; RV32IF-NEXT: fmv.w.x ft0, a0 +; RV32IF-NEXT: feq.s a0, ft0, ft0 +; RV32IF-NEXT: and a0, a0, a1 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcmp_ord: ; RV64IF: # %bb.0: -; RV64IF-NEXT: fmv.w.x ft0, a0 -; RV64IF-NEXT: fmv.w.x ft1, a1 -; RV64IF-NEXT: feq.s a0, ft1, ft1 +; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: feq.s a1, ft0, ft0 -; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: feq.s a0, ft0, ft0 +; RV64IF-NEXT: and a0, a0, a1 ; RV64IF-NEXT: ret %1 = fcmp ord float %a, %b %2 = zext i1 %1 to i32 @@ -303,21 +303,21 @@ define i32 @fcmp_une(float %a, float %b) nounwind { define i32 @fcmp_uno(float %a, float %b) nounwind { ; RV32IF-LABEL: fcmp_uno: ; RV32IF: # %bb.0: -; RV32IF-NEXT: fmv.w.x ft0, a0 -; RV32IF-NEXT: fmv.w.x ft1, a1 -; RV32IF-NEXT: feq.s a0, ft1, ft1 +; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: feq.s a1, ft0, ft0 -; RV32IF-NEXT: and a0, a1, a0 +; RV32IF-NEXT: fmv.w.x ft0, a0 +; RV32IF-NEXT: feq.s a0, ft0, ft0 +; RV32IF-NEXT: and a0, a0, a1 ; RV32IF-NEXT: seqz a0, a0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fcmp_uno: ; RV64IF: # %bb.0: -; RV64IF-NEXT: fmv.w.x ft0, a0 -; RV64IF-NEXT: fmv.w.x ft1, a1 -; RV64IF-NEXT: feq.s a0, ft1, ft1 +; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: feq.s a1, ft0, ft0 -; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: feq.s a0, ft0, ft0 +; RV64IF-NEXT: and a0, a0, a1 ; RV64IF-NEXT: seqz a0, a0 ; RV64IF-NEXT: ret %1 = fcmp uno float %a, %b diff --git a/llvm/test/CodeGen/RISCV/float-imm.ll b/llvm/test/CodeGen/RISCV/float-imm.ll index e0ec464..09d9c3c 100644 --- a/llvm/test/CodeGen/RISCV/float-imm.ll +++ b/llvm/test/CodeGen/RISCV/float-imm.ll @@ -26,21 +26,21 @@ define float @float_imm_op(float %a) nounwind { ; TODO: addi should be folded in to the flw ; RV32IF-LABEL: float_imm_op: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a1, %hi(.LCPI1_0) -; RV32IF-NEXT: addi a1, a1, %lo(.LCPI1_0) -; RV32IF-NEXT: flw ft0, 0(a1) -; RV32IF-NEXT: fmv.w.x ft1, a0 -; RV32IF-NEXT: fadd.s ft0, ft1, ft0 +; RV32IF-NEXT: fmv.w.x ft0, a0 +; RV32IF-NEXT: lui a0, %hi(.LCPI1_0) +; RV32IF-NEXT: addi a0, a0, %lo(.LCPI1_0) +; RV32IF-NEXT: flw ft1, 0(a0) +; RV32IF-NEXT: fadd.s ft0, ft0, ft1 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: float_imm_op: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a1, %hi(.LCPI1_0) -; RV64IF-NEXT: addi a1, a1, %lo(.LCPI1_0) -; RV64IF-NEXT: flw ft0, 0(a1) -; RV64IF-NEXT: fmv.w.x ft1, a0 -; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: lui a0, %hi(.LCPI1_0) +; RV64IF-NEXT: addi a0, a0, %lo(.LCPI1_0) +; RV64IF-NEXT: flw ft1, 0(a0) +; RV64IF-NEXT: fadd.s ft0, ft0, ft1 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret %1 = fadd float %a, 1.0 diff --git a/llvm/test/CodeGen/RISCV/float-intrinsics.ll b/llvm/test/CodeGen/RISCV/float-intrinsics.ll index 68bb95b..7ac2df6 100644 --- a/llvm/test/CodeGen/RISCV/float-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/float-intrinsics.ll @@ -108,16 +108,17 @@ define float @sincos_f32(float %a) nounwind { ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) ; RV32IF-NEXT: sw s0, 8(sp) +; RV32IF-NEXT: sw s1, 4(sp) ; RV32IF-NEXT: mv s0, a0 ; RV32IF-NEXT: call sinf -; RV32IF-NEXT: fmv.w.x ft0, a0 -; RV32IF-NEXT: fsw ft0, 4(sp) +; RV32IF-NEXT: mv s1, a0 ; RV32IF-NEXT: mv a0, s0 ; RV32IF-NEXT: call cosf ; RV32IF-NEXT: fmv.w.x ft0, a0 -; RV32IF-NEXT: flw ft1, 4(sp) +; RV32IF-NEXT: fmv.w.x ft1, s1 ; RV32IF-NEXT: fadd.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: lw s1, 4(sp) ; RV32IF-NEXT: lw s0, 8(sp) ; RV32IF-NEXT: lw ra, 12(sp) ; RV32IF-NEXT: addi sp, sp, 16 @@ -128,16 +129,17 @@ define float @sincos_f32(float %a) nounwind { ; RV64IF-NEXT: addi sp, sp, -32 ; RV64IF-NEXT: sd ra, 24(sp) ; RV64IF-NEXT: sd s0, 16(sp) +; RV64IF-NEXT: sd s1, 8(sp) ; RV64IF-NEXT: mv s0, a0 ; RV64IF-NEXT: call sinf -; RV64IF-NEXT: fmv.w.x ft0, a0 -; RV64IF-NEXT: fsw ft0, 12(sp) +; RV64IF-NEXT: mv s1, a0 ; RV64IF-NEXT: mv a0, s0 ; RV64IF-NEXT: call cosf ; RV64IF-NEXT: fmv.w.x ft0, a0 -; RV64IF-NEXT: flw ft1, 12(sp) +; RV64IF-NEXT: fmv.w.x ft1, s1 ; RV64IF-NEXT: fadd.s ft0, ft1, ft0 ; RV64IF-NEXT: fmv.x.w a0, ft0 +; RV64IF-NEXT: ld s1, 8(sp) ; RV64IF-NEXT: ld s0, 16(sp) ; RV64IF-NEXT: ld ra, 24(sp) ; RV64IF-NEXT: addi sp, sp, 32 @@ -322,21 +324,21 @@ define float @fmuladd_f32(float %a, float %b, float %c) nounwind { ; Use of fmadd depends on TargetLowering::isFMAFasterthanFMulAndFAdd ; RV32IF-LABEL: fmuladd_f32: ; RV32IF: # %bb.0: -; RV32IF-NEXT: fmv.w.x ft0, a2 -; RV32IF-NEXT: fmv.w.x ft1, a1 -; RV32IF-NEXT: fmv.w.x ft2, a0 -; RV32IF-NEXT: fmul.s ft1, ft2, ft1 -; RV32IF-NEXT: fadd.s ft0, ft1, ft0 +; RV32IF-NEXT: fmv.w.x ft0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a0 +; RV32IF-NEXT: fmul.s ft0, ft1, ft0 +; RV32IF-NEXT: fmv.w.x ft1, a2 +; RV32IF-NEXT: fadd.s ft0, ft0, ft1 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fmuladd_f32: ; RV64IF: # %bb.0: -; RV64IF-NEXT: fmv.w.x ft0, a2 -; RV64IF-NEXT: fmv.w.x ft1, a1 -; RV64IF-NEXT: fmv.w.x ft2, a0 -; RV64IF-NEXT: fmul.s ft1, ft2, ft1 -; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fmul.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.w.x ft1, a2 +; RV64IF-NEXT: fadd.s ft0, ft0, ft1 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret %1 = call float @llvm.fmuladd.f32(float %a, float %b, float %c) diff --git a/llvm/test/CodeGen/RISCV/float-mem.ll b/llvm/test/CodeGen/RISCV/float-mem.ll index c989235..2d1a7a7 100644 --- a/llvm/test/CodeGen/RISCV/float-mem.ll +++ b/llvm/test/CodeGen/RISCV/float-mem.ll @@ -7,17 +7,17 @@ define float @flw(float *%a) nounwind { ; RV32IF-LABEL: flw: ; RV32IF: # %bb.0: -; RV32IF-NEXT: flw ft0, 0(a0) -; RV32IF-NEXT: flw ft1, 12(a0) -; RV32IF-NEXT: fadd.s ft0, ft0, ft1 +; RV32IF-NEXT: flw ft0, 12(a0) +; RV32IF-NEXT: flw ft1, 0(a0) +; RV32IF-NEXT: fadd.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: flw: ; RV64IF: # %bb.0: -; RV64IF-NEXT: flw ft0, 0(a0) -; RV64IF-NEXT: flw ft1, 12(a0) -; RV64IF-NEXT: fadd.s ft0, ft0, ft1 +; RV64IF-NEXT: flw ft0, 12(a0) +; RV64IF-NEXT: flw ft1, 0(a0) +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret %1 = load float, float* %a @@ -37,8 +37,8 @@ define void @fsw(float *%a, float %b, float %c) nounwind { ; RV32IF-NEXT: fmv.w.x ft0, a2 ; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: fadd.s ft0, ft1, ft0 -; RV32IF-NEXT: fsw ft0, 0(a0) ; RV32IF-NEXT: fsw ft0, 32(a0) +; RV32IF-NEXT: fsw ft0, 0(a0) ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: fsw: @@ -46,8 +46,8 @@ define void @fsw(float *%a, float %b, float %c) nounwind { ; RV64IF-NEXT: fmv.w.x ft0, a2 ; RV64IF-NEXT: fmv.w.x ft1, a1 ; RV64IF-NEXT: fadd.s ft0, ft1, ft0 -; RV64IF-NEXT: fsw ft0, 0(a0) ; RV64IF-NEXT: fsw ft0, 32(a0) +; RV64IF-NEXT: fsw ft0, 0(a0) ; RV64IF-NEXT: ret %1 = fadd float %b, %c store float %1, float* %a @@ -70,10 +70,10 @@ define float @flw_fsw_global(float %a, float %b) nounwind { ; RV32IF-NEXT: lui a0, %hi(G) ; RV32IF-NEXT: flw ft1, %lo(G)(a0) ; RV32IF-NEXT: fsw ft0, %lo(G)(a0) -; RV32IF-NEXT: addi a1, a0, %lo(G) -; RV32IF-NEXT: flw ft1, 36(a1) +; RV32IF-NEXT: addi a0, a0, %lo(G) +; RV32IF-NEXT: flw ft1, 36(a0) +; RV32IF-NEXT: fsw ft0, 36(a0) ; RV32IF-NEXT: fmv.x.w a0, ft0 -; RV32IF-NEXT: fsw ft0, 36(a1) ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: flw_fsw_global: @@ -84,10 +84,10 @@ define float @flw_fsw_global(float %a, float %b) nounwind { ; RV64IF-NEXT: lui a0, %hi(G) ; RV64IF-NEXT: flw ft1, %lo(G)(a0) ; RV64IF-NEXT: fsw ft0, %lo(G)(a0) -; RV64IF-NEXT: addi a1, a0, %lo(G) -; RV64IF-NEXT: flw ft1, 36(a1) +; RV64IF-NEXT: addi a0, a0, %lo(G) +; RV64IF-NEXT: flw ft1, 36(a0) +; RV64IF-NEXT: fsw ft0, 36(a0) ; RV64IF-NEXT: fmv.x.w a0, ft0 -; RV64IF-NEXT: fsw ft0, 36(a1) ; RV64IF-NEXT: ret %1 = fadd float %a, %b %2 = load volatile float, float* @G @@ -102,24 +102,24 @@ define float @flw_fsw_global(float %a, float %b) nounwind { define float @flw_fsw_constant(float %a) nounwind { ; RV32IF-LABEL: flw_fsw_constant: ; RV32IF: # %bb.0: -; RV32IF-NEXT: lui a1, 912092 -; RV32IF-NEXT: flw ft0, -273(a1) -; RV32IF-NEXT: fmv.w.x ft1, a0 -; RV32IF-NEXT: fadd.s ft0, ft1, ft0 +; RV32IF-NEXT: fmv.w.x ft0, a0 +; RV32IF-NEXT: lui a0, 912092 +; RV32IF-NEXT: flw ft1, -273(a0) +; RV32IF-NEXT: fadd.s ft0, ft0, ft1 +; RV32IF-NEXT: fsw ft0, -273(a0) ; RV32IF-NEXT: fmv.x.w a0, ft0 -; RV32IF-NEXT: fsw ft0, -273(a1) ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: flw_fsw_constant: ; RV64IF: # %bb.0: -; RV64IF-NEXT: lui a1, 56 -; RV64IF-NEXT: addiw a1, a1, -1353 -; RV64IF-NEXT: slli a1, a1, 14 -; RV64IF-NEXT: flw ft0, -273(a1) -; RV64IF-NEXT: fmv.w.x ft1, a0 -; RV64IF-NEXT: fadd.s ft0, ft1, ft0 +; RV64IF-NEXT: fmv.w.x ft0, a0 +; RV64IF-NEXT: lui a0, 56 +; RV64IF-NEXT: addiw a0, a0, -1353 +; RV64IF-NEXT: slli a0, a0, 14 +; RV64IF-NEXT: flw ft1, -273(a0) +; RV64IF-NEXT: fadd.s ft0, ft0, ft1 +; RV64IF-NEXT: fsw ft0, -273(a0) ; RV64IF-NEXT: fmv.x.w a0, ft0 -; RV64IF-NEXT: fsw ft0, -273(a1) ; RV64IF-NEXT: ret %1 = inttoptr i32 3735928559 to float* %2 = load volatile float, float* %1 @@ -135,32 +135,34 @@ define float @flw_stack(float %a) nounwind { ; RV32IF: # %bb.0: ; RV32IF-NEXT: addi sp, sp, -16 ; RV32IF-NEXT: sw ra, 12(sp) -; RV32IF-NEXT: fmv.w.x ft0, a0 -; RV32IF-NEXT: fsw ft0, 4(sp) -; RV32IF-NEXT: addi a0, sp, 8 +; RV32IF-NEXT: sw s0, 8(sp) +; RV32IF-NEXT: mv s0, a0 +; RV32IF-NEXT: addi a0, sp, 4 ; RV32IF-NEXT: call notdead -; RV32IF-NEXT: flw ft0, 8(sp) +; RV32IF-NEXT: fmv.w.x ft0, s0 ; RV32IF-NEXT: flw ft1, 4(sp) -; RV32IF-NEXT: fadd.s ft0, ft0, ft1 +; RV32IF-NEXT: fadd.s ft0, ft1, ft0 ; RV32IF-NEXT: fmv.x.w a0, ft0 +; RV32IF-NEXT: lw s0, 8(sp) ; RV32IF-NEXT: lw ra, 12(sp) ; RV32IF-NEXT: addi sp, sp, 16 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: flw_stack: ; RV64IF: # %bb.0: -; RV64IF-NEXT: addi sp, sp, -16 -; RV64IF-NEXT: sd ra, 8(sp) -; RV64IF-NEXT: fmv.w.x ft0, a0 -; RV64IF-NEXT: fsw ft0, 0(sp) -; RV64IF-NEXT: addi a0, sp, 4 +; RV64IF-NEXT: addi sp, sp, -32 +; RV64IF-NEXT: sd ra, 24(sp) +; RV64IF-NEXT: sd s0, 16(sp) +; RV64IF-NEXT: mv s0, a0 +; RV64IF-NEXT: addi a0, sp, 12 ; RV64IF-NEXT: call notdead -; RV64IF-NEXT: flw ft0, 4(sp) -; RV64IF-NEXT: flw ft1, 0(sp) -; RV64IF-NEXT: fadd.s ft0, ft0, ft1 +; RV64IF-NEXT: fmv.w.x ft0, s0 +; RV64IF-NEXT: flw ft1, 12(sp) +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 ; RV64IF-NEXT: fmv.x.w a0, ft0 -; RV64IF-NEXT: ld ra, 8(sp) -; RV64IF-NEXT: addi sp, sp, 16 +; RV64IF-NEXT: ld s0, 16(sp) +; RV64IF-NEXT: ld ra, 24(sp) +; RV64IF-NEXT: addi sp, sp, 32 ; RV64IF-NEXT: ret %1 = alloca float, align 4 %2 = bitcast float* %1 to i8* diff --git a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll index 142e4f8..ec81c53 100644 --- a/llvm/test/CodeGen/RISCV/float-select-fcmp.ll +++ b/llvm/test/CodeGen/RISCV/float-select-fcmp.ll @@ -240,14 +240,14 @@ define float @select_fcmp_ord(float %a, float %b) nounwind { define float @select_fcmp_ueq(float %a, float %b) nounwind { ; RV32IF-LABEL: select_fcmp_ueq: ; RV32IF: # %bb.0: -; RV32IF-NEXT: fmv.w.x ft1, a1 ; RV32IF-NEXT: fmv.w.x ft0, a0 -; RV32IF-NEXT: feq.s a0, ft0, ft1 -; RV32IF-NEXT: feq.s a1, ft1, ft1 -; RV32IF-NEXT: feq.s a2, ft0, ft0 -; RV32IF-NEXT: and a1, a2, a1 -; RV32IF-NEXT: seqz a1, a1 -; RV32IF-NEXT: or a0, a0, a1 +; RV32IF-NEXT: fmv.w.x ft1, a1 +; RV32IF-NEXT: feq.s a0, ft1, ft1 +; RV32IF-NEXT: feq.s a1, ft0, ft0 +; RV32IF-NEXT: and a0, a1, a0 +; RV32IF-NEXT: seqz a0, a0 +; RV32IF-NEXT: feq.s a1, ft0, ft1 +; RV32IF-NEXT: or a0, a1, a0 ; RV32IF-NEXT: bnez a0, .LBB8_2 ; RV32IF-NEXT: # %bb.1: ; RV32IF-NEXT: fmv.s ft0, ft1 @@ -257,14 +257,14 @@ define float @select_fcmp_ueq(float %a, float %b) nounwind { ; ; RV64IF-LABEL: select_fcmp_ueq: ; RV64IF: # %bb.0: -; RV64IF-NEXT: fmv.w.x ft1, a1 ; RV64IF-NEXT: fmv.w.x ft0, a0 -; RV64IF-NEXT: feq.s a0, ft0, ft1 -; RV64IF-NEXT: feq.s a1, ft1, ft1 -; RV64IF-NEXT: feq.s a2, ft0, ft0 -; RV64IF-NEXT: and a1, a2, a1 -; RV64IF-NEXT: seqz a1, a1 -; RV64IF-NEXT: or a0, a0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a1 +; RV64IF-NEXT: feq.s a0, ft1, ft1 +; RV64IF-NEXT: feq.s a1, ft0, ft0 +; RV64IF-NEXT: and a0, a1, a0 +; RV64IF-NEXT: seqz a0, a0 +; RV64IF-NEXT: feq.s a1, ft0, ft1 +; RV64IF-NEXT: or a0, a1, a0 ; RV64IF-NEXT: bnez a0, .LBB8_2 ; RV64IF-NEXT: # %bb.1: ; RV64IF-NEXT: fmv.s ft0, ft1 @@ -486,24 +486,24 @@ define i32 @i32_select_fcmp_oeq(float %a, float %b, i32 %c, i32 %d) nounwind { ; RV32IF: # %bb.0: ; RV32IF-NEXT: fmv.w.x ft0, a1 ; RV32IF-NEXT: fmv.w.x ft1, a0 -; RV32IF-NEXT: feq.s a1, ft1, ft0 -; RV32IF-NEXT: mv a0, a2 -; RV32IF-NEXT: bnez a1, .LBB16_2 +; RV32IF-NEXT: feq.s a0, ft1, ft0 +; RV32IF-NEXT: bnez a0, .LBB16_2 ; RV32IF-NEXT: # %bb.1: -; RV32IF-NEXT: mv a0, a3 +; RV32IF-NEXT: mv a2, a3 ; RV32IF-NEXT: .LBB16_2: +; RV32IF-NEXT: mv a0, a2 ; RV32IF-NEXT: ret ; ; RV64IF-LABEL: i32_select_fcmp_oeq: ; RV64IF: # %bb.0: ; RV64IF-NEXT: fmv.w.x ft0, a1 ; RV64IF-NEXT: fmv.w.x ft1, a0 -; RV64IF-NEXT: feq.s a1, ft1, ft0 -; RV64IF-NEXT: mv a0, a2 -; RV64IF-NEXT: bnez a1, .LBB16_2 +; RV64IF-NEXT: feq.s a0, ft1, ft0 +; RV64IF-NEXT: bnez a0, .LBB16_2 ; RV64IF-NEXT: # %bb.1: -; RV64IF-NEXT: mv a0, a3 +; RV64IF-NEXT: mv a2, a3 ; RV64IF-NEXT: .LBB16_2: +; RV64IF-NEXT: mv a0, a2 ; RV64IF-NEXT: ret %1 = fcmp oeq float %a, %b %2 = select i1 %1, i32 %c, i32 %d diff --git a/llvm/test/CodeGen/RISCV/fp128.ll b/llvm/test/CodeGen/RISCV/fp128.ll index 91b1702..a928d69 100644 --- a/llvm/test/CodeGen/RISCV/fp128.ll +++ b/llvm/test/CodeGen/RISCV/fp128.ll @@ -13,28 +13,28 @@ define i32 @test_load_and_cmp() nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -48 ; RV32I-NEXT: sw ra, 44(sp) -; RV32I-NEXT: lui a0, %hi(x) -; RV32I-NEXT: addi a1, a0, %lo(x) -; RV32I-NEXT: lw a6, 4(a1) -; RV32I-NEXT: lw a7, 8(a1) -; RV32I-NEXT: lw a1, 12(a1) -; RV32I-NEXT: lw a0, %lo(x)(a0) -; RV32I-NEXT: lui a4, %hi(y) -; RV32I-NEXT: addi a5, a4, %lo(y) -; RV32I-NEXT: lw a2, 4(a5) -; RV32I-NEXT: lw a3, 8(a5) -; RV32I-NEXT: lw a5, 12(a5) -; RV32I-NEXT: lw a4, %lo(y)(a4) -; RV32I-NEXT: sw a4, 8(sp) -; RV32I-NEXT: sw a0, 24(sp) -; RV32I-NEXT: sw a5, 20(sp) -; RV32I-NEXT: sw a3, 16(sp) -; RV32I-NEXT: sw a2, 12(sp) +; RV32I-NEXT: lui a0, %hi(y) +; RV32I-NEXT: lw a1, %lo(y)(a0) +; RV32I-NEXT: sw a1, 8(sp) +; RV32I-NEXT: lui a1, %hi(x) +; RV32I-NEXT: lw a2, %lo(x)(a1) +; RV32I-NEXT: sw a2, 24(sp) +; RV32I-NEXT: addi a0, a0, %lo(y) +; RV32I-NEXT: lw a2, 12(a0) +; RV32I-NEXT: sw a2, 20(sp) +; RV32I-NEXT: lw a2, 8(a0) +; RV32I-NEXT: sw a2, 16(sp) +; RV32I-NEXT: lw a0, 4(a0) +; RV32I-NEXT: sw a0, 12(sp) +; RV32I-NEXT: addi a0, a1, %lo(x) +; RV32I-NEXT: lw a1, 12(a0) ; RV32I-NEXT: sw a1, 36(sp) -; RV32I-NEXT: sw a7, 32(sp) +; RV32I-NEXT: lw a1, 8(a0) +; RV32I-NEXT: sw a1, 32(sp) +; RV32I-NEXT: lw a0, 4(a0) +; RV32I-NEXT: sw a0, 28(sp) ; RV32I-NEXT: addi a0, sp, 24 ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: sw a6, 28(sp) ; RV32I-NEXT: call __netf2 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: lw ra, 44(sp) @@ -52,39 +52,39 @@ define i32 @test_add_and_fptosi() nounwind { ; RV32I: # %bb.0: ; RV32I-NEXT: addi sp, sp, -80 ; RV32I-NEXT: sw ra, 76(sp) -; RV32I-NEXT: lui a0, %hi(x) -; RV32I-NEXT: addi a1, a0, %lo(x) -; RV32I-NEXT: lw a6, 4(a1) -; RV32I-NEXT: lw a7, 8(a1) -; RV32I-NEXT: lw a1, 12(a1) -; RV32I-NEXT: lw a0, %lo(x)(a0) -; RV32I-NEXT: lui a4, %hi(y) -; RV32I-NEXT: addi a5, a4, %lo(y) -; RV32I-NEXT: lw a3, 4(a5) -; RV32I-NEXT: lw a2, 8(a5) -; RV32I-NEXT: lw a5, 12(a5) -; RV32I-NEXT: lw a4, %lo(y)(a4) -; RV32I-NEXT: sw a4, 24(sp) -; RV32I-NEXT: sw a0, 40(sp) -; RV32I-NEXT: sw a5, 36(sp) +; RV32I-NEXT: lui a0, %hi(y) +; RV32I-NEXT: lw a1, %lo(y)(a0) +; RV32I-NEXT: sw a1, 24(sp) +; RV32I-NEXT: lui a1, %hi(x) +; RV32I-NEXT: lw a2, %lo(x)(a1) +; RV32I-NEXT: sw a2, 40(sp) +; RV32I-NEXT: addi a0, a0, %lo(y) +; RV32I-NEXT: lw a2, 12(a0) +; RV32I-NEXT: sw a2, 36(sp) +; RV32I-NEXT: lw a2, 8(a0) ; RV32I-NEXT: sw a2, 32(sp) -; RV32I-NEXT: sw a3, 28(sp) +; RV32I-NEXT: lw a0, 4(a0) +; RV32I-NEXT: sw a0, 28(sp) +; RV32I-NEXT: addi a0, a1, %lo(x) +; RV32I-NEXT: lw a1, 12(a0) ; RV32I-NEXT: sw a1, 52(sp) -; RV32I-NEXT: sw a7, 48(sp) +; RV32I-NEXT: lw a1, 8(a0) +; RV32I-NEXT: sw a1, 48(sp) +; RV32I-NEXT: lw a0, 4(a0) +; RV32I-NEXT: sw a0, 44(sp) ; RV32I-NEXT: addi a0, sp, 56 ; RV32I-NEXT: addi a1, sp, 40 ; RV32I-NEXT: addi a2, sp, 24 -; RV32I-NEXT: sw a6, 44(sp) ; RV32I-NEXT: call __addtf3 -; RV32I-NEXT: lw a1, 56(sp) +; RV32I-NEXT: lw a0, 68(sp) +; RV32I-NEXT: sw a0, 20(sp) +; RV32I-NEXT: lw a0, 64(sp) +; RV32I-NEXT: sw a0, 16(sp) ; RV32I-NEXT: lw a0, 60(sp) -; RV32I-NEXT: lw a2, 64(sp) -; RV32I-NEXT: lw a3, 68(sp) -; RV32I-NEXT: sw a3, 20(sp) -; RV32I-NEXT: sw a2, 16(sp) ; RV32I-NEXT: sw a0, 12(sp) +; RV32I-NEXT: lw a0, 56(sp) +; RV32I-NEXT: sw a0, 8(sp) ; RV32I-NEXT: addi a0, sp, 8 -; RV32I-NEXT: sw a1, 8(sp) ; RV32I-NEXT: call __fixtfsi ; RV32I-NEXT: lw ra, 76(sp) ; RV32I-NEXT: addi sp, sp, 80 diff --git a/llvm/test/CodeGen/RISCV/frame-info.ll b/llvm/test/CodeGen/RISCV/frame-info.ll index 754e8f5..9022fc3 100644 --- a/llvm/test/CodeGen/RISCV/frame-info.ll +++ b/llvm/test/CodeGen/RISCV/frame-info.ll @@ -38,12 +38,12 @@ define void @foo(i32 signext %size) { ; RV64-NEXT: .cfi_offset s0, -16 ; RV64-NEXT: addi s0, sp, 16 ; RV64-NEXT: .cfi_def_cfa s0, 0 -; RV64-NEXT: slli a0, a0, 32 -; RV64-NEXT: srli a0, a0, 32 -; RV64-NEXT: addi a0, a0, 15 ; RV64-NEXT: addi a1, zero, 1 ; RV64-NEXT: slli a1, a1, 33 ; RV64-NEXT: addi a1, a1, -16 +; RV64-NEXT: slli a0, a0, 32 +; RV64-NEXT: srli a0, a0, 32 +; RV64-NEXT: addi a0, a0, 15 ; RV64-NEXT: and a0, a0, a1 ; RV64-NEXT: sub a0, sp, a0 ; RV64-NEXT: mv sp, a0 diff --git a/llvm/test/CodeGen/RISCV/get-setcc-result-type.ll b/llvm/test/CodeGen/RISCV/get-setcc-result-type.ll index 21923f0..fe326b3 100644 --- a/llvm/test/CodeGen/RISCV/get-setcc-result-type.ll +++ b/llvm/test/CodeGen/RISCV/get-setcc-result-type.ll @@ -6,21 +6,21 @@ define void @getSetCCResultType(<4 x i32>* %p, <4 x i32>* %q) nounwind { ; RV32I-LABEL: getSetCCResultType: ; RV32I: # %bb.0: # %entry ; RV32I-NEXT: lw a1, 12(a0) -; RV32I-NEXT: lw a2, 8(a0) -; RV32I-NEXT: lw a3, 4(a0) -; RV32I-NEXT: lw a4, 0(a0) ; RV32I-NEXT: seqz a1, a1 -; RV32I-NEXT: seqz a2, a2 -; RV32I-NEXT: seqz a3, a3 -; RV32I-NEXT: seqz a4, a4 -; RV32I-NEXT: neg a4, a4 -; RV32I-NEXT: neg a3, a3 -; RV32I-NEXT: neg a2, a2 ; RV32I-NEXT: neg a1, a1 ; RV32I-NEXT: sw a1, 12(a0) -; RV32I-NEXT: sw a2, 8(a0) -; RV32I-NEXT: sw a3, 4(a0) -; RV32I-NEXT: sw a4, 0(a0) +; RV32I-NEXT: lw a1, 8(a0) +; RV32I-NEXT: seqz a1, a1 +; RV32I-NEXT: neg a1, a1 +; RV32I-NEXT: sw a1, 8(a0) +; RV32I-NEXT: lw a1, 4(a0) +; RV32I-NEXT: seqz a1, a1 +; RV32I-NEXT: neg a1, a1 +; RV32I-NEXT: sw a1, 4(a0) +; RV32I-NEXT: lw a1, 0(a0) +; RV32I-NEXT: seqz a1, a1 +; RV32I-NEXT: neg a1, a1 +; RV32I-NEXT: sw a1, 0(a0) ; RV32I-NEXT: ret entry: %0 = load <4 x i32>, <4 x i32>* %p, align 16 diff --git a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll index df8b311..b00873c 100644 --- a/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll +++ b/llvm/test/CodeGen/RISCV/hoist-global-addr-base.ll @@ -12,10 +12,10 @@ define dso_local void @multiple_stores() local_unnamed_addr nounwind { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: lui a0, %hi(s) ; CHECK-NEXT: addi a0, a0, %lo(s) -; CHECK-NEXT: addi a1, zero, 10 -; CHECK-NEXT: sw a1, 160(a0) ; CHECK-NEXT: addi a1, zero, 20 ; CHECK-NEXT: sw a1, 164(a0) +; CHECK-NEXT: addi a1, zero, 10 +; CHECK-NEXT: sw a1, 160(a0) ; CHECK-NEXT: ret entry: store i32 10, i32* getelementptr inbounds (%struct.S, %struct.S* @s, i32 0, i32 1), align 4 diff --git a/llvm/test/CodeGen/RISCV/imm-cse.ll b/llvm/test/CodeGen/RISCV/imm-cse.ll index a1ddcd6..1af27f0 100644 --- a/llvm/test/CodeGen/RISCV/imm-cse.ll +++ b/llvm/test/CodeGen/RISCV/imm-cse.ll @@ -10,19 +10,19 @@ define void @imm32_cse() nounwind { ; RV32I-LABEL: imm32_cse: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a0, %hi(src) -; RV32I-NEXT: lw a1, %lo(src)(a0) -; RV32I-NEXT: lui a2, 1 -; RV32I-NEXT: addi a2, a2, 1 -; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: lui a0, 1 +; RV32I-NEXT: addi a0, a0, 1 +; RV32I-NEXT: lui a1, %hi(src) +; RV32I-NEXT: lw a2, %lo(src)(a1) +; RV32I-NEXT: add a2, a2, a0 ; RV32I-NEXT: lui a3, %hi(dst) -; RV32I-NEXT: sw a1, %lo(dst)(a3) -; RV32I-NEXT: lw a1, %lo(src)(a0) -; RV32I-NEXT: add a1, a1, a2 -; RV32I-NEXT: addi a1, a1, 1 -; RV32I-NEXT: sw a1, %lo(dst)(a3) -; RV32I-NEXT: lw a0, %lo(src)(a0) -; RV32I-NEXT: add a0, a0, a2 +; RV32I-NEXT: sw a2, %lo(dst)(a3) +; RV32I-NEXT: lw a2, %lo(src)(a1) +; RV32I-NEXT: add a2, a2, a0 +; RV32I-NEXT: addi a2, a2, 1 +; RV32I-NEXT: sw a2, %lo(dst)(a3) +; RV32I-NEXT: lw a1, %lo(src)(a1) +; RV32I-NEXT: add a0, a1, a0 ; RV32I-NEXT: addi a0, a0, 2 ; RV32I-NEXT: sw a0, %lo(dst)(a3) ; RV32I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/imm.ll b/llvm/test/CodeGen/RISCV/imm.ll index ce254f2..b733b59 100644 --- a/llvm/test/CodeGen/RISCV/imm.ll +++ b/llvm/test/CodeGen/RISCV/imm.ll @@ -142,8 +142,8 @@ define i64 @imm64_2() nounwind { define i64 @imm64_3() nounwind { ; RV32I-LABEL: imm64_3: ; RV32I: # %bb.0: -; RV32I-NEXT: addi a1, zero, 1 ; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: addi a1, zero, 1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm64_3: @@ -157,8 +157,8 @@ define i64 @imm64_3() nounwind { define i64 @imm64_4() nounwind { ; RV32I-LABEL: imm64_4: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a1, 524288 ; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: lui a1, 524288 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm64_4: @@ -172,8 +172,8 @@ define i64 @imm64_4() nounwind { define i64 @imm64_5() nounwind { ; RV32I-LABEL: imm64_5: ; RV32I: # %bb.0: -; RV32I-NEXT: lui a1, 524288 ; RV32I-NEXT: mv a0, zero +; RV32I-NEXT: lui a1, 524288 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm64_5: @@ -249,7 +249,7 @@ define i64 @imm64_9() nounwind { ; RV32I-LABEL: imm64_9: ; RV32I: # %bb.0: ; RV32I-NEXT: addi a0, zero, -1 -; RV32I-NEXT: addi a1, zero, -1 +; RV32I-NEXT: addi a1, zero, -1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: imm64_9: diff --git a/llvm/test/CodeGen/RISCV/indirectbr.ll b/llvm/test/CodeGen/RISCV/indirectbr.ll index 01201e8..e734de3 100644 --- a/llvm/test/CodeGen/RISCV/indirectbr.ll +++ b/llvm/test/CodeGen/RISCV/indirectbr.ll @@ -8,7 +8,7 @@ define i32 @indirectbr(i8* %target) nounwind { ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) ; RV32I-NEXT: jr a0 -; RV32I-NEXT: .LBB0_1: # %test_label +; RV32I-NEXT: .LBB0_1: ; RV32I-NEXT: mv a0, zero ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 @@ -26,7 +26,7 @@ define i32 @indirectbr_with_offset(i8* %a) nounwind { ; RV32I-NEXT: addi sp, sp, -16 ; RV32I-NEXT: sw ra, 12(sp) ; RV32I-NEXT: jr 1380(a0) -; RV32I-NEXT: .LBB1_1: # %test_label +; RV32I-NEXT: .LBB1_1: ; RV32I-NEXT: mv a0, zero ; RV32I-NEXT: lw ra, 12(sp) ; RV32I-NEXT: addi sp, sp, 16 diff --git a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll index 37ce0f8..6d4f8da 100644 --- a/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll +++ b/llvm/test/CodeGen/RISCV/inline-asm-d-constraint-f.ll @@ -26,11 +26,11 @@ define double @constraint_f_double(double %a) nounwind { ; ; RV64F-LABEL: constraint_f_double: ; RV64F: # %bb.0: -; RV64F-NEXT: lui a1, %hi(gd) -; RV64F-NEXT: fld ft0, %lo(gd)(a1) -; RV64F-NEXT: fmv.d.x ft1, a0 +; RV64F-NEXT: fmv.d.x ft0, a0 +; RV64F-NEXT: lui a0, %hi(gd) +; RV64F-NEXT: fld ft1, %lo(gd)(a0) ; RV64F-NEXT: #APP -; RV64F-NEXT: fadd.d ft0, ft1, ft0 +; RV64F-NEXT: fadd.d ft0, ft0, ft1 ; RV64F-NEXT: #NO_APP ; RV64F-NEXT: fmv.x.d a0, ft0 ; RV64F-NEXT: ret @@ -59,9 +59,9 @@ define double @constraint_f_double_abi_name(double %a) nounwind { ; ; RV64F-LABEL: constraint_f_double_abi_name: ; RV64F: # %bb.0: -; RV64F-NEXT: lui a1, %hi(gd) -; RV64F-NEXT: fld fs0, %lo(gd)(a1) ; RV64F-NEXT: fmv.d.x fa1, a0 +; RV64F-NEXT: lui a0, %hi(gd) +; RV64F-NEXT: fld fs0, %lo(gd)(a0) ; RV64F-NEXT: #APP ; RV64F-NEXT: fadd.d ft0, fa1, fs0 ; RV64F-NEXT: #NO_APP diff --git a/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll b/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll index 25bce62..c91b1ae 100644 --- a/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll +++ b/llvm/test/CodeGen/RISCV/inline-asm-f-constraint-f.ll @@ -9,22 +9,22 @@ define float @constraint_f_float(float %a) nounwind { ; RV32F-LABEL: constraint_f_float: ; RV32F: # %bb.0: -; RV32F-NEXT: lui a1, %hi(gf) -; RV32F-NEXT: flw ft0, %lo(gf)(a1) -; RV32F-NEXT: fmv.w.x ft1, a0 +; RV32F-NEXT: fmv.w.x ft0, a0 +; RV32F-NEXT: lui a0, %hi(gf) +; RV32F-NEXT: flw ft1, %lo(gf)(a0) ; RV32F-NEXT: #APP -; RV32F-NEXT: fadd.s ft0, ft1, ft0 +; RV32F-NEXT: fadd.s ft0, ft0, ft1 ; RV32F-NEXT: #NO_APP ; RV32F-NEXT: fmv.x.w a0, ft0 ; RV32F-NEXT: ret ; ; RV64F-LABEL: constraint_f_float: ; RV64F: # %bb.0: -; RV64F-NEXT: lui a1, %hi(gf) -; RV64F-NEXT: flw ft0, %lo(gf)(a1) -; RV64F-NEXT: fmv.w.x ft1, a0 +; RV64F-NEXT: fmv.w.x ft0, a0 +; RV64F-NEXT: lui a0, %hi(gf) +; RV64F-NEXT: flw ft1, %lo(gf)(a0) ; RV64F-NEXT: #APP -; RV64F-NEXT: fadd.s ft0, ft1, ft0 +; RV64F-NEXT: fadd.s ft0, ft0, ft1 ; RV64F-NEXT: #NO_APP ; RV64F-NEXT: fmv.x.w a0, ft0 ; RV64F-NEXT: ret @@ -36,9 +36,9 @@ define float @constraint_f_float(float %a) nounwind { define float @constraint_f_float_abi_name(float %a) nounwind { ; RV32F-LABEL: constraint_f_float_abi_name: ; RV32F: # %bb.0: -; RV32F-NEXT: lui a1, %hi(gf) -; RV32F-NEXT: flw fs0, %lo(gf)(a1) ; RV32F-NEXT: fmv.w.x fa0, a0 +; RV32F-NEXT: lui a0, %hi(gf) +; RV32F-NEXT: flw fs0, %lo(gf)(a0) ; RV32F-NEXT: #APP ; RV32F-NEXT: fadd.s ft0, fa0, fs0 ; RV32F-NEXT: #NO_APP @@ -47,9 +47,9 @@ define float @constraint_f_float_abi_name(float %a) nounwind { ; ; RV64F-LABEL: constraint_f_float_abi_name: ; RV64F: # %bb.0: -; RV64F-NEXT: lui a1, %hi(gf) -; RV64F-NEXT: flw fs0, %lo(gf)(a1) ; RV64F-NEXT: fmv.w.x fa0, a0 +; RV64F-NEXT: lui a0, %hi(gf) +; RV64F-NEXT: flw fs0, %lo(gf)(a0) ; RV64F-NEXT: #APP ; RV64F-NEXT: fadd.s ft0, fa0, fs0 ; RV64F-NEXT: #NO_APP diff --git a/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll b/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll index 3b3ee61..91b6249 100644 --- a/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll +++ b/llvm/test/CodeGen/RISCV/interrupt-attr-nocall.ll @@ -213,11 +213,11 @@ define void @foo_float() nounwind #0 { ; CHECK-RV32IF-NEXT: sw a0, 12(sp) ; CHECK-RV32IF-NEXT: fsw ft0, 8(sp) ; CHECK-RV32IF-NEXT: fsw ft1, 4(sp) -; CHECK-RV32IF-NEXT: lui a0, %hi(e) -; CHECK-RV32IF-NEXT: flw ft0, %lo(e)(a0) ; CHECK-RV32IF-NEXT: lui a0, %hi(f) -; CHECK-RV32IF-NEXT: flw ft1, %lo(f)(a0) -; CHECK-RV32IF-NEXT: fadd.s ft0, ft0, ft1 +; CHECK-RV32IF-NEXT: flw ft0, %lo(f)(a0) +; CHECK-RV32IF-NEXT: lui a0, %hi(e) +; CHECK-RV32IF-NEXT: flw ft1, %lo(e)(a0) +; CHECK-RV32IF-NEXT: fadd.s ft0, ft1, ft0 ; CHECK-RV32IF-NEXT: lui a0, %hi(d) ; CHECK-RV32IF-NEXT: fsw ft0, %lo(d)(a0) ; CHECK-RV32IF-NEXT: flw ft1, 4(sp) @@ -232,11 +232,11 @@ define void @foo_float() nounwind #0 { ; CHECK-RV32IFD-NEXT: sw a0, 28(sp) ; CHECK-RV32IFD-NEXT: fsd ft0, 16(sp) ; CHECK-RV32IFD-NEXT: fsd ft1, 8(sp) -; CHECK-RV32IFD-NEXT: lui a0, %hi(e) -; CHECK-RV32IFD-NEXT: flw ft0, %lo(e)(a0) ; CHECK-RV32IFD-NEXT: lui a0, %hi(f) -; CHECK-RV32IFD-NEXT: flw ft1, %lo(f)(a0) -; CHECK-RV32IFD-NEXT: fadd.s ft0, ft0, ft1 +; CHECK-RV32IFD-NEXT: flw ft0, %lo(f)(a0) +; CHECK-RV32IFD-NEXT: lui a0, %hi(e) +; CHECK-RV32IFD-NEXT: flw ft1, %lo(e)(a0) +; CHECK-RV32IFD-NEXT: fadd.s ft0, ft1, ft0 ; CHECK-RV32IFD-NEXT: lui a0, %hi(d) ; CHECK-RV32IFD-NEXT: fsw ft0, %lo(d)(a0) ; CHECK-RV32IFD-NEXT: fld ft1, 8(sp) @@ -312,11 +312,11 @@ define void @foo_fp_float() nounwind #1 { ; CHECK-RV32IF-NEXT: fsw ft0, 16(sp) ; CHECK-RV32IF-NEXT: fsw ft1, 12(sp) ; CHECK-RV32IF-NEXT: addi s0, sp, 32 -; CHECK-RV32IF-NEXT: lui a0, %hi(e) -; CHECK-RV32IF-NEXT: flw ft0, %lo(e)(a0) ; CHECK-RV32IF-NEXT: lui a0, %hi(f) -; CHECK-RV32IF-NEXT: flw ft1, %lo(f)(a0) -; CHECK-RV32IF-NEXT: fadd.s ft0, ft0, ft1 +; CHECK-RV32IF-NEXT: flw ft0, %lo(f)(a0) +; CHECK-RV32IF-NEXT: lui a0, %hi(e) +; CHECK-RV32IF-NEXT: flw ft1, %lo(e)(a0) +; CHECK-RV32IF-NEXT: fadd.s ft0, ft1, ft0 ; CHECK-RV32IF-NEXT: lui a0, %hi(d) ; CHECK-RV32IF-NEXT: fsw ft0, %lo(d)(a0) ; CHECK-RV32IF-NEXT: flw ft1, 12(sp) @@ -336,11 +336,11 @@ define void @foo_fp_float() nounwind #1 { ; CHECK-RV32IFD-NEXT: fsd ft0, 8(sp) ; CHECK-RV32IFD-NEXT: fsd ft1, 0(sp) ; CHECK-RV32IFD-NEXT: addi s0, sp, 32 -; CHECK-RV32IFD-NEXT: lui a0, %hi(e) -; CHECK-RV32IFD-NEXT: flw ft0, %lo(e)(a0) ; CHECK-RV32IFD-NEXT: lui a0, %hi(f) -; CHECK-RV32IFD-NEXT: flw ft1, %lo(f)(a0) -; CHECK-RV32IFD-NEXT: fadd.s ft0, ft0, ft1 +; CHECK-RV32IFD-NEXT: flw ft0, %lo(f)(a0) +; CHECK-RV32IFD-NEXT: lui a0, %hi(e) +; CHECK-RV32IFD-NEXT: flw ft1, %lo(e)(a0) +; CHECK-RV32IFD-NEXT: fadd.s ft0, ft1, ft0 ; CHECK-RV32IFD-NEXT: lui a0, %hi(d) ; CHECK-RV32IFD-NEXT: fsw ft0, %lo(d)(a0) ; CHECK-RV32IFD-NEXT: fld ft1, 0(sp) @@ -534,11 +534,11 @@ define void @foo_double() nounwind #0 { ; CHECK-RV32IFD-NEXT: sw a0, 28(sp) ; CHECK-RV32IFD-NEXT: fsd ft0, 16(sp) ; CHECK-RV32IFD-NEXT: fsd ft1, 8(sp) -; CHECK-RV32IFD-NEXT: lui a0, %hi(h) -; CHECK-RV32IFD-NEXT: fld ft0, %lo(h)(a0) ; CHECK-RV32IFD-NEXT: lui a0, %hi(i) -; CHECK-RV32IFD-NEXT: fld ft1, %lo(i)(a0) -; CHECK-RV32IFD-NEXT: fadd.d ft0, ft0, ft1 +; CHECK-RV32IFD-NEXT: fld ft0, %lo(i)(a0) +; CHECK-RV32IFD-NEXT: lui a0, %hi(h) +; CHECK-RV32IFD-NEXT: fld ft1, %lo(h)(a0) +; CHECK-RV32IFD-NEXT: fadd.d ft0, ft1, ft0 ; CHECK-RV32IFD-NEXT: lui a0, %hi(g) ; CHECK-RV32IFD-NEXT: fsd ft0, %lo(g)(a0) ; CHECK-RV32IFD-NEXT: fld ft1, 8(sp) @@ -738,11 +738,11 @@ define void @foo_fp_double() nounwind #1 { ; CHECK-RV32IFD-NEXT: fsd ft0, 8(sp) ; CHECK-RV32IFD-NEXT: fsd ft1, 0(sp) ; CHECK-RV32IFD-NEXT: addi s0, sp, 32 -; CHECK-RV32IFD-NEXT: lui a0, %hi(h) -; CHECK-RV32IFD-NEXT: fld ft0, %lo(h)(a0) ; CHECK-RV32IFD-NEXT: lui a0, %hi(i) -; CHECK-RV32IFD-NEXT: fld ft1, %lo(i)(a0) -; CHECK-RV32IFD-NEXT: fadd.d ft0, ft0, ft1 +; CHECK-RV32IFD-NEXT: fld ft0, %lo(i)(a0) +; CHECK-RV32IFD-NEXT: lui a0, %hi(h) +; CHECK-RV32IFD-NEXT: fld ft1, %lo(h)(a0) +; CHECK-RV32IFD-NEXT: fadd.d ft0, ft1, ft0 ; CHECK-RV32IFD-NEXT: lui a0, %hi(g) ; CHECK-RV32IFD-NEXT: fsd ft0, %lo(g)(a0) ; CHECK-RV32IFD-NEXT: fld ft1, 0(sp) diff --git a/llvm/test/CodeGen/RISCV/legalize-fneg.ll b/llvm/test/CodeGen/RISCV/legalize-fneg.ll index 80f0a98..d2d0b9e 100644 --- a/llvm/test/CodeGen/RISCV/legalize-fneg.ll +++ b/llvm/test/CodeGen/RISCV/legalize-fneg.ll @@ -15,9 +15,9 @@ define void @test1(float* %a, float* %b) nounwind { ; ; RV64-LABEL: test1: ; RV64: # %bb.0: # %entry -; RV64-NEXT: lw a1, 0(a1) ; RV64-NEXT: addi a2, zero, 1 ; RV64-NEXT: slli a2, a2, 31 +; RV64-NEXT: lw a1, 0(a1) ; RV64-NEXT: xor a1, a1, a2 ; RV64-NEXT: sw a1, 0(a0) ; RV64-NEXT: ret @@ -33,17 +33,17 @@ define void @test2(double* %a, double* %b) nounwind { ; RV32: # %bb.0: # %entry ; RV32-NEXT: lw a2, 4(a1) ; RV32-NEXT: lw a1, 0(a1) -; RV32-NEXT: lui a3, 524288 -; RV32-NEXT: xor a2, a2, a3 ; RV32-NEXT: sw a1, 0(a0) -; RV32-NEXT: sw a2, 4(a0) +; RV32-NEXT: lui a1, 524288 +; RV32-NEXT: xor a1, a2, a1 +; RV32-NEXT: sw a1, 4(a0) ; RV32-NEXT: ret ; ; RV64-LABEL: test2: ; RV64: # %bb.0: # %entry -; RV64-NEXT: ld a1, 0(a1) ; RV64-NEXT: addi a2, zero, -1 ; RV64-NEXT: slli a2, a2, 63 +; RV64-NEXT: ld a1, 0(a1) ; RV64-NEXT: xor a1, a1, a2 ; RV64-NEXT: sd a1, 0(a0) ; RV64-NEXT: ret @@ -57,27 +57,27 @@ entry: define void @test3(fp128* %a, fp128* %b) nounwind { ; RV32-LABEL: test3: ; RV32: # %bb.0: # %entry -; RV32-NEXT: lw a2, 4(a1) -; RV32-NEXT: lw a3, 12(a1) -; RV32-NEXT: lw a4, 8(a1) -; RV32-NEXT: lw a1, 0(a1) -; RV32-NEXT: lui a5, 524288 -; RV32-NEXT: xor a3, a3, a5 -; RV32-NEXT: sw a4, 8(a0) -; RV32-NEXT: sw a1, 0(a0) -; RV32-NEXT: sw a2, 4(a0) -; RV32-NEXT: sw a3, 12(a0) +; RV32-NEXT: lw a2, 12(a1) +; RV32-NEXT: lw a3, 4(a1) +; RV32-NEXT: lw a4, 0(a1) +; RV32-NEXT: lw a1, 8(a1) +; RV32-NEXT: sw a1, 8(a0) +; RV32-NEXT: sw a4, 0(a0) +; RV32-NEXT: sw a3, 4(a0) +; RV32-NEXT: lui a1, 524288 +; RV32-NEXT: xor a1, a2, a1 +; RV32-NEXT: sw a1, 12(a0) ; RV32-NEXT: ret ; ; RV64-LABEL: test3: ; RV64: # %bb.0: # %entry ; RV64-NEXT: ld a2, 8(a1) ; RV64-NEXT: ld a1, 0(a1) -; RV64-NEXT: addi a3, zero, -1 -; RV64-NEXT: slli a3, a3, 63 -; RV64-NEXT: xor a2, a2, a3 ; RV64-NEXT: sd a1, 0(a0) -; RV64-NEXT: sd a2, 8(a0) +; RV64-NEXT: addi a1, zero, -1 +; RV64-NEXT: slli a1, a1, 63 +; RV64-NEXT: xor a1, a2, a1 +; RV64-NEXT: sd a1, 8(a0) ; RV64-NEXT: ret entry: %0 = load fp128, fp128* %b diff --git a/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll b/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll index dbe9eb9..2f4c821 100644 --- a/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll +++ b/llvm/test/CodeGen/RISCV/lsr-legaladdimm.ll @@ -11,21 +11,21 @@ define i32 @main() nounwind { ; RV32I-LABEL: main: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: mv a0, zero -; RV32I-NEXT: lui a1, %hi(b) -; RV32I-NEXT: addi a1, a1, %lo(b) -; RV32I-NEXT: lui a2, %hi(a) -; RV32I-NEXT: addi a2, a2, %lo(a) -; RV32I-NEXT: lui a3, 1 +; RV32I-NEXT: lui a0, %hi(b) +; RV32I-NEXT: addi a0, a0, %lo(b) +; RV32I-NEXT: lui a1, %hi(a) +; RV32I-NEXT: addi a1, a1, %lo(a) +; RV32I-NEXT: lui a2, 1 +; RV32I-NEXT: mv a3, zero ; RV32I-NEXT: .LBB0_1: # %for.body ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32I-NEXT: addi a4, a0, -2048 -; RV32I-NEXT: sw a4, 0(a2) -; RV32I-NEXT: sw a0, 0(a1) -; RV32I-NEXT: addi a0, a0, 1 +; RV32I-NEXT: addi a4, a3, -2048 +; RV32I-NEXT: sw a4, 0(a1) ; RV32I-NEXT: addi a1, a1, 4 -; RV32I-NEXT: addi a2, a2, 4 -; RV32I-NEXT: bne a0, a3, .LBB0_1 +; RV32I-NEXT: sw a3, 0(a0) +; RV32I-NEXT: addi a0, a0, 4 +; RV32I-NEXT: addi a3, a3, 1 +; RV32I-NEXT: bne a3, a2, .LBB0_1 ; RV32I-NEXT: # %bb.2: # %for.end ; RV32I-NEXT: mv a0, zero ; RV32I-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/mem.ll b/llvm/test/CodeGen/RISCV/mem.ll index 5a2eeee..f873e84 100644 --- a/llvm/test/CodeGen/RISCV/mem.ll +++ b/llvm/test/CodeGen/RISCV/mem.ll @@ -7,9 +7,8 @@ define i32 @lb(i8 *%a) nounwind { ; RV32I-LABEL: lb: ; RV32I: # %bb.0: -; RV32I-NEXT: lb a1, 1(a0) -; RV32I-NEXT: lb a0, 0(a0) -; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: lb a1, 0(a0) +; RV32I-NEXT: lb a0, 1(a0) ; RV32I-NEXT: ret %1 = getelementptr i8, i8* %a, i32 1 %2 = load i8, i8* %1 @@ -22,9 +21,8 @@ define i32 @lb(i8 *%a) nounwind { define i32 @lh(i16 *%a) nounwind { ; RV32I-LABEL: lh: ; RV32I: # %bb.0: -; RV32I-NEXT: lh a1, 4(a0) -; RV32I-NEXT: lh a0, 0(a0) -; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: lh a1, 0(a0) +; RV32I-NEXT: lh a0, 4(a0) ; RV32I-NEXT: ret %1 = getelementptr i16, i16* %a, i32 2 %2 = load i16, i16* %1 @@ -37,9 +35,8 @@ define i32 @lh(i16 *%a) nounwind { define i32 @lw(i32 *%a) nounwind { ; RV32I-LABEL: lw: ; RV32I: # %bb.0: -; RV32I-NEXT: lw a1, 12(a0) -; RV32I-NEXT: lw a0, 0(a0) -; RV32I-NEXT: mv a0, a1 +; RV32I-NEXT: lw a1, 0(a0) +; RV32I-NEXT: lw a0, 12(a0) ; RV32I-NEXT: ret %1 = getelementptr i32, i32* %a, i32 3 %2 = load i32, i32* %1 @@ -50,9 +47,9 @@ define i32 @lw(i32 *%a) nounwind { define i32 @lbu(i8 *%a) nounwind { ; RV32I-LABEL: lbu: ; RV32I: # %bb.0: -; RV32I-NEXT: lbu a1, 4(a0) -; RV32I-NEXT: lbu a0, 0(a0) -; RV32I-NEXT: add a0, a1, a0 +; RV32I-NEXT: lbu a1, 0(a0) +; RV32I-NEXT: lbu a0, 4(a0) +; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret %1 = getelementptr i8, i8* %a, i32 4 %2 = load i8, i8* %1 @@ -66,9 +63,9 @@ define i32 @lbu(i8 *%a) nounwind { define i32 @lhu(i16 *%a) nounwind { ; RV32I-LABEL: lhu: ; RV32I: # %bb.0: -; RV32I-NEXT: lhu a1, 10(a0) -; RV32I-NEXT: lhu a0, 0(a0) -; RV32I-NEXT: add a0, a1, a0 +; RV32I-NEXT: lhu a1, 0(a0) +; RV32I-NEXT: lhu a0, 10(a0) +; RV32I-NEXT: add a0, a0, a1 ; RV32I-NEXT: ret %1 = getelementptr i16, i16* %a, i32 5 %2 = load i16, i16* %1 @@ -84,8 +81,8 @@ define i32 @lhu(i16 *%a) nounwind { define void @sb(i8 *%a, i8 %b) nounwind { ; RV32I-LABEL: sb: ; RV32I: # %bb.0: -; RV32I-NEXT: sb a1, 0(a0) ; RV32I-NEXT: sb a1, 6(a0) +; RV32I-NEXT: sb a1, 0(a0) ; RV32I-NEXT: ret store i8 %b, i8* %a %1 = getelementptr i8, i8* %a, i32 6 @@ -96,8 +93,8 @@ define void @sb(i8 *%a, i8 %b) nounwind { define void @sh(i16 *%a, i16 %b) nounwind { ; RV32I-LABEL: sh: ; RV32I: # %bb.0: -; RV32I-NEXT: sh a1, 0(a0) ; RV32I-NEXT: sh a1, 14(a0) +; RV32I-NEXT: sh a1, 0(a0) ; RV32I-NEXT: ret store i16 %b, i16* %a %1 = getelementptr i16, i16* %a, i32 7 @@ -108,8 +105,8 @@ define void @sh(i16 *%a, i16 %b) nounwind { define void @sw(i32 *%a, i32 %b) nounwind { ; RV32I-LABEL: sw: ; RV32I: # %bb.0: -; RV32I-NEXT: sw a1, 0(a0) ; RV32I-NEXT: sw a1, 32(a0) +; RV32I-NEXT: sw a1, 0(a0) ; RV32I-NEXT: ret store i32 %b, i32* %a %1 = getelementptr i32, i32* %a, i32 8 @@ -121,10 +118,10 @@ define void @sw(i32 *%a, i32 %b) nounwind { define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind { ; RV32I-LABEL: load_sext_zext_anyext_i1: ; RV32I: # %bb.0: +; RV32I-NEXT: lb a1, 0(a0) ; RV32I-NEXT: lbu a1, 1(a0) -; RV32I-NEXT: lbu a2, 2(a0) -; RV32I-NEXT: lb a0, 0(a0) -; RV32I-NEXT: sub a0, a2, a1 +; RV32I-NEXT: lbu a0, 2(a0) +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; sextload i1 %1 = getelementptr i1, i1* %a, i32 1 @@ -143,10 +140,10 @@ define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind { define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind { ; RV32I-LABEL: load_sext_zext_anyext_i1_i16: ; RV32I: # %bb.0: +; RV32I-NEXT: lb a1, 0(a0) ; RV32I-NEXT: lbu a1, 1(a0) -; RV32I-NEXT: lbu a2, 2(a0) -; RV32I-NEXT: lb a0, 0(a0) -; RV32I-NEXT: sub a0, a2, a1 +; RV32I-NEXT: lbu a0, 2(a0) +; RV32I-NEXT: sub a0, a0, a1 ; RV32I-NEXT: ret ; sextload i1 %1 = getelementptr i1, i1* %a, i32 1 diff --git a/llvm/test/CodeGen/RISCV/mem64.ll b/llvm/test/CodeGen/RISCV/mem64.ll index 522786b..32669e8 100644 --- a/llvm/test/CodeGen/RISCV/mem64.ll +++ b/llvm/test/CodeGen/RISCV/mem64.ll @@ -7,9 +7,8 @@ define i64 @lb(i8 *%a) nounwind { ; RV64I-LABEL: lb: ; RV64I: # %bb.0: -; RV64I-NEXT: lb a1, 1(a0) -; RV64I-NEXT: lb a0, 0(a0) -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: lb a1, 0(a0) +; RV64I-NEXT: lb a0, 1(a0) ; RV64I-NEXT: ret %1 = getelementptr i8, i8* %a, i32 1 %2 = load i8, i8* %1 @@ -22,9 +21,8 @@ define i64 @lb(i8 *%a) nounwind { define i64 @lh(i16 *%a) nounwind { ; RV64I-LABEL: lh: ; RV64I: # %bb.0: -; RV64I-NEXT: lh a1, 4(a0) -; RV64I-NEXT: lh a0, 0(a0) -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: lh a1, 0(a0) +; RV64I-NEXT: lh a0, 4(a0) ; RV64I-NEXT: ret %1 = getelementptr i16, i16* %a, i32 2 %2 = load i16, i16* %1 @@ -37,9 +35,8 @@ define i64 @lh(i16 *%a) nounwind { define i64 @lw(i32 *%a) nounwind { ; RV64I-LABEL: lw: ; RV64I: # %bb.0: -; RV64I-NEXT: lw a1, 12(a0) -; RV64I-NEXT: lw a0, 0(a0) -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: lw a1, 0(a0) +; RV64I-NEXT: lw a0, 12(a0) ; RV64I-NEXT: ret %1 = getelementptr i32, i32* %a, i32 3 %2 = load i32, i32* %1 @@ -52,9 +49,9 @@ define i64 @lw(i32 *%a) nounwind { define i64 @lbu(i8 *%a) nounwind { ; RV64I-LABEL: lbu: ; RV64I: # %bb.0: -; RV64I-NEXT: lbu a1, 4(a0) -; RV64I-NEXT: lbu a0, 0(a0) -; RV64I-NEXT: add a0, a1, a0 +; RV64I-NEXT: lbu a1, 0(a0) +; RV64I-NEXT: lbu a0, 4(a0) +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret %1 = getelementptr i8, i8* %a, i32 4 %2 = load i8, i8* %1 @@ -68,9 +65,9 @@ define i64 @lbu(i8 *%a) nounwind { define i64 @lhu(i16 *%a) nounwind { ; RV64I-LABEL: lhu: ; RV64I: # %bb.0: -; RV64I-NEXT: lhu a1, 10(a0) -; RV64I-NEXT: lhu a0, 0(a0) -; RV64I-NEXT: add a0, a1, a0 +; RV64I-NEXT: lhu a1, 0(a0) +; RV64I-NEXT: lhu a0, 10(a0) +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret %1 = getelementptr i16, i16* %a, i32 5 %2 = load i16, i16* %1 @@ -84,9 +81,9 @@ define i64 @lhu(i16 *%a) nounwind { define i64 @lwu(i32 *%a) nounwind { ; RV64I-LABEL: lwu: ; RV64I: # %bb.0: -; RV64I-NEXT: lwu a1, 24(a0) -; RV64I-NEXT: lwu a0, 0(a0) -; RV64I-NEXT: add a0, a1, a0 +; RV64I-NEXT: lwu a1, 0(a0) +; RV64I-NEXT: lwu a0, 24(a0) +; RV64I-NEXT: add a0, a0, a1 ; RV64I-NEXT: ret %1 = getelementptr i32, i32* %a, i32 6 %2 = load i32, i32* %1 @@ -102,8 +99,8 @@ define i64 @lwu(i32 *%a) nounwind { define void @sb(i8 *%a, i8 %b) nounwind { ; RV64I-LABEL: sb: ; RV64I: # %bb.0: -; RV64I-NEXT: sb a1, 0(a0) ; RV64I-NEXT: sb a1, 7(a0) +; RV64I-NEXT: sb a1, 0(a0) ; RV64I-NEXT: ret store i8 %b, i8* %a %1 = getelementptr i8, i8* %a, i32 7 @@ -114,8 +111,8 @@ define void @sb(i8 *%a, i8 %b) nounwind { define void @sh(i16 *%a, i16 %b) nounwind { ; RV64I-LABEL: sh: ; RV64I: # %bb.0: -; RV64I-NEXT: sh a1, 0(a0) ; RV64I-NEXT: sh a1, 16(a0) +; RV64I-NEXT: sh a1, 0(a0) ; RV64I-NEXT: ret store i16 %b, i16* %a %1 = getelementptr i16, i16* %a, i32 8 @@ -126,8 +123,8 @@ define void @sh(i16 *%a, i16 %b) nounwind { define void @sw(i32 *%a, i32 %b) nounwind { ; RV64I-LABEL: sw: ; RV64I: # %bb.0: -; RV64I-NEXT: sw a1, 0(a0) ; RV64I-NEXT: sw a1, 36(a0) +; RV64I-NEXT: sw a1, 0(a0) ; RV64I-NEXT: ret store i32 %b, i32* %a %1 = getelementptr i32, i32* %a, i32 9 @@ -140,9 +137,8 @@ define void @sw(i32 *%a, i32 %b) nounwind { define i64 @ld(i64 *%a) nounwind { ; RV64I-LABEL: ld: ; RV64I: # %bb.0: -; RV64I-NEXT: ld a1, 80(a0) -; RV64I-NEXT: ld a0, 0(a0) -; RV64I-NEXT: mv a0, a1 +; RV64I-NEXT: ld a1, 0(a0) +; RV64I-NEXT: ld a0, 80(a0) ; RV64I-NEXT: ret %1 = getelementptr i64, i64* %a, i32 10 %2 = load i64, i64* %1 @@ -153,8 +149,8 @@ define i64 @ld(i64 *%a) nounwind { define void @sd(i64 *%a, i64 %b) nounwind { ; RV64I-LABEL: sd: ; RV64I: # %bb.0: -; RV64I-NEXT: sd a1, 0(a0) ; RV64I-NEXT: sd a1, 88(a0) +; RV64I-NEXT: sd a1, 0(a0) ; RV64I-NEXT: ret store i64 %b, i64* %a %1 = getelementptr i64, i64* %a, i32 11 @@ -166,10 +162,10 @@ define void @sd(i64 *%a, i64 %b) nounwind { define i64 @load_sext_zext_anyext_i1(i1 *%a) nounwind { ; RV64I-LABEL: load_sext_zext_anyext_i1: ; RV64I: # %bb.0: +; RV64I-NEXT: lb a1, 0(a0) ; RV64I-NEXT: lbu a1, 1(a0) -; RV64I-NEXT: lbu a2, 2(a0) -; RV64I-NEXT: lb a0, 0(a0) -; RV64I-NEXT: sub a0, a2, a1 +; RV64I-NEXT: lbu a0, 2(a0) +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; sextload i1 %1 = getelementptr i1, i1* %a, i32 1 @@ -188,10 +184,10 @@ define i64 @load_sext_zext_anyext_i1(i1 *%a) nounwind { define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind { ; RV64I-LABEL: load_sext_zext_anyext_i1_i16: ; RV64I: # %bb.0: +; RV64I-NEXT: lb a1, 0(a0) ; RV64I-NEXT: lbu a1, 1(a0) -; RV64I-NEXT: lbu a2, 2(a0) -; RV64I-NEXT: lb a0, 0(a0) -; RV64I-NEXT: sub a0, a2, a1 +; RV64I-NEXT: lbu a0, 2(a0) +; RV64I-NEXT: sub a0, a0, a1 ; RV64I-NEXT: ret ; sextload i1 %1 = getelementptr i1, i1* %a, i32 1 diff --git a/llvm/test/CodeGen/RISCV/mul.ll b/llvm/test/CodeGen/RISCV/mul.ll index 2448580..9bf95be 100644 --- a/llvm/test/CodeGen/RISCV/mul.ll +++ b/llvm/test/CodeGen/RISCV/mul.ll @@ -247,8 +247,8 @@ define i32 @mulhs(i32 %a, i32 %b) nounwind { ; ; RV64IM-LABEL: mulhs: ; RV64IM: # %bb.0: -; RV64IM-NEXT: sext.w a0, a0 ; RV64IM-NEXT: sext.w a1, a1 +; RV64IM-NEXT: sext.w a0, a0 ; RV64IM-NEXT: mul a0, a0, a1 ; RV64IM-NEXT: srli a0, a0, 32 ; RV64IM-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/remat.ll b/llvm/test/CodeGen/RISCV/remat.ll index d8267e7..41cb90b 100644 --- a/llvm/test/CodeGen/RISCV/remat.ll +++ b/llvm/test/CodeGen/RISCV/remat.ll @@ -37,19 +37,19 @@ define i32 @test() nounwind { ; RV32I-NEXT: sw s9, 20(sp) ; RV32I-NEXT: sw s10, 16(sp) ; RV32I-NEXT: sw s11, 12(sp) -; RV32I-NEXT: lui s6, %hi(a) -; RV32I-NEXT: lw a0, %lo(a)(s6) +; RV32I-NEXT: lui s9, %hi(a) +; RV32I-NEXT: lw a0, %lo(a)(s9) ; RV32I-NEXT: beqz a0, .LBB0_11 ; RV32I-NEXT: # %bb.1: # %for.body.preheader ; RV32I-NEXT: lui s2, %hi(l) ; RV32I-NEXT: lui s3, %hi(k) ; RV32I-NEXT: lui s4, %hi(j) -; RV32I-NEXT: lui s5, %hi(i) -; RV32I-NEXT: lui s1, %hi(d) -; RV32I-NEXT: lui s0, %hi(e) -; RV32I-NEXT: lui s7, %hi(f) -; RV32I-NEXT: lui s8, %hi(g) -; RV32I-NEXT: lui s9, %hi(h) +; RV32I-NEXT: lui s6, %hi(i) +; RV32I-NEXT: lui s5, %hi(h) +; RV32I-NEXT: lui s7, %hi(g) +; RV32I-NEXT: lui s8, %hi(f) +; RV32I-NEXT: lui s1, %hi(e) +; RV32I-NEXT: lui s0, %hi(d) ; RV32I-NEXT: lui s10, %hi(c) ; RV32I-NEXT: lui s11, %hi(b) ; RV32I-NEXT: lw a1, %lo(l)(s2) @@ -57,19 +57,19 @@ define i32 @test() nounwind { ; RV32I-NEXT: j .LBB0_5 ; RV32I-NEXT: .LBB0_2: # %for.inc ; RV32I-NEXT: # in Loop: Header=BB0_5 Depth=1 -; RV32I-NEXT: lw a0, %lo(a)(s6) +; RV32I-NEXT: lw a0, %lo(a)(s9) ; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: sw a0, %lo(a)(s6) +; RV32I-NEXT: sw a0, %lo(a)(s9) ; RV32I-NEXT: beqz a0, .LBB0_11 ; RV32I-NEXT: # %bb.3: # %for.body ; RV32I-NEXT: # in Loop: Header=BB0_5 Depth=1 ; RV32I-NEXT: lw a1, %lo(l)(s2) ; RV32I-NEXT: beqz a1, .LBB0_5 ; RV32I-NEXT: .LBB0_4: # %if.then -; RV32I-NEXT: lw a1, %lo(b)(s11) +; RV32I-NEXT: lw a4, %lo(e)(s1) +; RV32I-NEXT: lw a3, %lo(d)(s0) ; RV32I-NEXT: lw a2, %lo(c)(s10) -; RV32I-NEXT: lw a3, %lo(d)(s1) -; RV32I-NEXT: lw a4, %lo(e)(s0) +; RV32I-NEXT: lw a1, %lo(b)(s11) ; RV32I-NEXT: addi a5, zero, 32 ; RV32I-NEXT: call foo ; RV32I-NEXT: .LBB0_5: # %if.end @@ -78,11 +78,11 @@ define i32 @test() nounwind { ; RV32I-NEXT: beqz a0, .LBB0_7 ; RV32I-NEXT: # %bb.6: # %if.then3 ; RV32I-NEXT: # in Loop: Header=BB0_5 Depth=1 -; RV32I-NEXT: lw a0, %lo(b)(s11) +; RV32I-NEXT: lw a4, %lo(f)(s8) +; RV32I-NEXT: lw a3, %lo(e)(s1) +; RV32I-NEXT: lw a2, %lo(d)(s0) ; RV32I-NEXT: lw a1, %lo(c)(s10) -; RV32I-NEXT: lw a2, %lo(d)(s1) -; RV32I-NEXT: lw a3, %lo(e)(s0) -; RV32I-NEXT: lw a4, %lo(f)(s7) +; RV32I-NEXT: lw a0, %lo(b)(s11) ; RV32I-NEXT: addi a5, zero, 64 ; RV32I-NEXT: call foo ; RV32I-NEXT: .LBB0_7: # %if.end5 @@ -91,24 +91,24 @@ define i32 @test() nounwind { ; RV32I-NEXT: beqz a0, .LBB0_9 ; RV32I-NEXT: # %bb.8: # %if.then7 ; RV32I-NEXT: # in Loop: Header=BB0_5 Depth=1 +; RV32I-NEXT: lw a4, %lo(g)(s7) +; RV32I-NEXT: lw a3, %lo(f)(s8) +; RV32I-NEXT: lw a2, %lo(e)(s1) +; RV32I-NEXT: lw a1, %lo(d)(s0) ; RV32I-NEXT: lw a0, %lo(c)(s10) -; RV32I-NEXT: lw a1, %lo(d)(s1) -; RV32I-NEXT: lw a2, %lo(e)(s0) -; RV32I-NEXT: lw a3, %lo(f)(s7) -; RV32I-NEXT: lw a4, %lo(g)(s8) ; RV32I-NEXT: addi a5, zero, 32 ; RV32I-NEXT: call foo ; RV32I-NEXT: .LBB0_9: # %if.end9 ; RV32I-NEXT: # in Loop: Header=BB0_5 Depth=1 -; RV32I-NEXT: lw a0, %lo(i)(s5) +; RV32I-NEXT: lw a0, %lo(i)(s6) ; RV32I-NEXT: beqz a0, .LBB0_2 ; RV32I-NEXT: # %bb.10: # %if.then11 ; RV32I-NEXT: # in Loop: Header=BB0_5 Depth=1 -; RV32I-NEXT: lw a0, %lo(d)(s1) -; RV32I-NEXT: lw a1, %lo(e)(s0) -; RV32I-NEXT: lw a2, %lo(f)(s7) -; RV32I-NEXT: lw a3, %lo(g)(s8) -; RV32I-NEXT: lw a4, %lo(h)(s9) +; RV32I-NEXT: lw a4, %lo(h)(s5) +; RV32I-NEXT: lw a3, %lo(g)(s7) +; RV32I-NEXT: lw a2, %lo(f)(s8) +; RV32I-NEXT: lw a1, %lo(e)(s1) +; RV32I-NEXT: lw a0, %lo(d)(s0) ; RV32I-NEXT: addi a5, zero, 32 ; RV32I-NEXT: call foo ; RV32I-NEXT: j .LBB0_2 diff --git a/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll b/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll index 13e4724..5f89520 100644 --- a/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll +++ b/llvm/test/CodeGen/RISCV/rv64f-float-convert.ll @@ -113,9 +113,9 @@ define zeroext i32 @bcvt_f32_to_zext_i32(float %a, float %b) nounwind { define float @bcvt_i64_to_f32_via_i32(i64 %a, i64 %b) nounwind { ; RV64IF-LABEL: bcvt_i64_to_f32_via_i32: ; RV64IF: # %bb.0: -; RV64IF-NEXT: fmv.w.x ft0, a0 -; RV64IF-NEXT: fmv.w.x ft1, a1 -; RV64IF-NEXT: fadd.s ft0, ft0, ft1 +; RV64IF-NEXT: fmv.w.x ft0, a1 +; RV64IF-NEXT: fmv.w.x ft1, a0 +; RV64IF-NEXT: fadd.s ft0, ft1, ft0 ; RV64IF-NEXT: fmv.x.w a0, ft0 ; RV64IF-NEXT: ret %1 = trunc i64 %a to i32 diff --git a/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll b/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll index a545a13..89e3f41 100644 --- a/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-complex-float.ll @@ -13,16 +13,16 @@ define i64 @complex_float_add(i64 %a.coerce, i64 %b.coerce) nounwind { ; CHECK-NEXT: sd s0, 16(sp) ; CHECK-NEXT: sd s1, 8(sp) ; CHECK-NEXT: sd s2, 0(sp) -; CHECK-NEXT: srli s2, a0, 32 -; CHECK-NEXT: srli s1, a1, 32 +; CHECK-NEXT: mv s0, a1 +; CHECK-NEXT: mv s1, a0 ; CHECK-NEXT: call __addsf3 -; CHECK-NEXT: mv s0, a0 -; CHECK-NEXT: mv a0, s2 -; CHECK-NEXT: mv a1, s1 +; CHECK-NEXT: mv s2, a0 +; CHECK-NEXT: srli a0, s1, 32 +; CHECK-NEXT: srli a1, s0, 32 ; CHECK-NEXT: call __addsf3 -; CHECK-NEXT: slli a0, a0, 32 -; CHECK-NEXT: slli a1, s0, 32 +; CHECK-NEXT: slli a1, s2, 32 ; CHECK-NEXT: srli a1, a1, 32 +; CHECK-NEXT: slli a0, a0, 32 ; CHECK-NEXT: or a0, a0, a1 ; CHECK-NEXT: ld s2, 0(sp) ; CHECK-NEXT: ld s1, 8(sp) diff --git a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll index c4a4de7..8b17746 100644 --- a/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll +++ b/llvm/test/CodeGen/RISCV/rv64i-w-insts-legalization.ll @@ -8,17 +8,17 @@ define signext i32 @addw(i32 signext %s, i32 signext %n, i32 signext %k) nounwin ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: not a2, a0 ; CHECK-NEXT: add a2, a2, a1 -; CHECK-NEXT: addi a3, a0, 1 -; CHECK-NEXT: mul a3, a2, a3 -; CHECK-NEXT: slli a2, a2, 32 -; CHECK-NEXT: srli a2, a2, 32 ; CHECK-NEXT: sub a1, a1, a0 ; CHECK-NEXT: addi a1, a1, -2 ; CHECK-NEXT: slli a1, a1, 32 ; CHECK-NEXT: srli a1, a1, 32 -; CHECK-NEXT: mul a1, a2, a1 +; CHECK-NEXT: slli a3, a2, 32 +; CHECK-NEXT: srli a3, a3, 32 +; CHECK-NEXT: mul a1, a3, a1 +; CHECK-NEXT: addi a3, a0, 1 +; CHECK-NEXT: mul a2, a2, a3 +; CHECK-NEXT: add a0, a2, a0 ; CHECK-NEXT: srli a1, a1, 1 -; CHECK-NEXT: add a0, a3, a0 ; CHECK-NEXT: addw a0, a0, a1 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB0_2: @@ -54,18 +54,18 @@ define signext i32 @subw(i32 signext %s, i32 signext %n, i32 signext %k) nounwin ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: bge a0, a1, .LBB1_2 ; CHECK-NEXT: # %bb.1: # %for.body.preheader -; CHECK-NEXT: not a2, a0 -; CHECK-NEXT: add a3, a2, a1 -; CHECK-NEXT: mul a2, a3, a2 -; CHECK-NEXT: slli a3, a3, 32 -; CHECK-NEXT: srli a3, a3, 32 -; CHECK-NEXT: sub a1, a1, a0 -; CHECK-NEXT: addi a1, a1, -2 -; CHECK-NEXT: slli a1, a1, 32 -; CHECK-NEXT: srli a1, a1, 32 -; CHECK-NEXT: mul a1, a3, a1 -; CHECK-NEXT: srli a1, a1, 1 -; CHECK-NEXT: sub a0, a2, a0 +; CHECK-NEXT: sub a2, a1, a0 +; CHECK-NEXT: addi a2, a2, -2 +; CHECK-NEXT: slli a2, a2, 32 +; CHECK-NEXT: srli a2, a2, 32 +; CHECK-NEXT: not a3, a0 +; CHECK-NEXT: add a1, a3, a1 +; CHECK-NEXT: slli a4, a1, 32 +; CHECK-NEXT: srli a4, a4, 32 +; CHECK-NEXT: mul a2, a4, a2 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: sub a0, a1, a0 +; CHECK-NEXT: srli a1, a2, 1 ; CHECK-NEXT: subw a0, a0, a1 ; CHECK-NEXT: ret ; CHECK-NEXT: .LBB1_2: diff --git a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll index d38f3d5..3a18b4a 100644 --- a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll +++ b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll @@ -40,41 +40,27 @@ define i128 @cmovcc128(i64 signext %a, i128 %b, i128 %c) nounwind { ; RV32I-LABEL: cmovcc128: ; RV32I: # %bb.0: # %entry ; RV32I-NEXT: xori a1, a1, 123 -; RV32I-NEXT: or a2, a1, a2 -; RV32I-NEXT: mv a1, a3 -; RV32I-NEXT: beqz a2, .LBB1_2 +; RV32I-NEXT: or a1, a1, a2 +; RV32I-NEXT: beqz a1, .LBB1_2 ; RV32I-NEXT: # %bb.1: # %entry -; RV32I-NEXT: mv a1, a4 -; RV32I-NEXT: .LBB1_2: # %entry -; RV32I-NEXT: lw a6, 0(a1) -; RV32I-NEXT: beqz a2, .LBB1_6 -; RV32I-NEXT: # %bb.3: # %entry ; RV32I-NEXT: addi a1, a4, 4 -; RV32I-NEXT: lw a5, 0(a1) -; RV32I-NEXT: bnez a2, .LBB1_7 -; RV32I-NEXT: .LBB1_4: -; RV32I-NEXT: addi a1, a3, 8 -; RV32I-NEXT: lw a1, 0(a1) -; RV32I-NEXT: bnez a2, .LBB1_8 -; RV32I-NEXT: .LBB1_5: -; RV32I-NEXT: addi a2, a3, 12 -; RV32I-NEXT: j .LBB1_9 -; RV32I-NEXT: .LBB1_6: +; RV32I-NEXT: addi a2, a4, 8 +; RV32I-NEXT: addi a5, a4, 12 +; RV32I-NEXT: mv a3, a4 +; RV32I-NEXT: j .LBB1_3 +; RV32I-NEXT: .LBB1_2: ; RV32I-NEXT: addi a1, a3, 4 -; RV32I-NEXT: lw a5, 0(a1) -; RV32I-NEXT: beqz a2, .LBB1_4 -; RV32I-NEXT: .LBB1_7: # %entry -; RV32I-NEXT: addi a1, a4, 8 -; RV32I-NEXT: lw a1, 0(a1) -; RV32I-NEXT: beqz a2, .LBB1_5 -; RV32I-NEXT: .LBB1_8: # %entry -; RV32I-NEXT: addi a2, a4, 12 -; RV32I-NEXT: .LBB1_9: # %entry +; RV32I-NEXT: addi a2, a3, 8 +; RV32I-NEXT: addi a5, a3, 12 +; RV32I-NEXT: .LBB1_3: # %entry +; RV32I-NEXT: lw a4, 0(a5) +; RV32I-NEXT: sw a4, 12(a0) ; RV32I-NEXT: lw a2, 0(a2) -; RV32I-NEXT: sw a2, 12(a0) -; RV32I-NEXT: sw a1, 8(a0) -; RV32I-NEXT: sw a5, 4(a0) -; RV32I-NEXT: sw a6, 0(a0) +; RV32I-NEXT: sw a2, 8(a0) +; RV32I-NEXT: lw a1, 0(a1) +; RV32I-NEXT: sw a1, 4(a0) +; RV32I-NEXT: lw a1, 0(a3) +; RV32I-NEXT: sw a1, 0(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: cmovcc128: @@ -97,24 +83,24 @@ entry: define i64 @cmov64(i1 %a, i64 %b, i64 %c) nounwind { ; RV32I-LABEL: cmov64: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: andi a5, a0, 1 -; RV32I-NEXT: mv a0, a1 -; RV32I-NEXT: bnez a5, .LBB2_2 +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: bnez a0, .LBB2_2 ; RV32I-NEXT: # %bb.1: # %entry -; RV32I-NEXT: mv a0, a3 +; RV32I-NEXT: mv a1, a3 ; RV32I-NEXT: mv a2, a4 ; RV32I-NEXT: .LBB2_2: # %entry +; RV32I-NEXT: mv a0, a1 ; RV32I-NEXT: mv a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: cmov64: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: andi a3, a0, 1 -; RV64I-NEXT: mv a0, a1 -; RV64I-NEXT: bnez a3, .LBB2_2 +; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: bnez a0, .LBB2_2 ; RV64I-NEXT: # %bb.1: # %entry -; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: mv a1, a2 ; RV64I-NEXT: .LBB2_2: # %entry +; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: ret entry: %cond = select i1 %a, i64 %b, i64 %c @@ -124,52 +110,38 @@ entry: define i128 @cmov128(i1 %a, i128 %b, i128 %c) nounwind { ; RV32I-LABEL: cmov128: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: andi a4, a1, 1 -; RV32I-NEXT: mv a1, a2 -; RV32I-NEXT: bnez a4, .LBB3_2 +; RV32I-NEXT: andi a1, a1, 1 +; RV32I-NEXT: bnez a1, .LBB3_2 ; RV32I-NEXT: # %bb.1: # %entry -; RV32I-NEXT: mv a1, a3 -; RV32I-NEXT: .LBB3_2: # %entry -; RV32I-NEXT: lw a6, 0(a1) -; RV32I-NEXT: bnez a4, .LBB3_6 -; RV32I-NEXT: # %bb.3: # %entry ; RV32I-NEXT: addi a1, a3, 4 -; RV32I-NEXT: lw a5, 0(a1) -; RV32I-NEXT: beqz a4, .LBB3_7 -; RV32I-NEXT: .LBB3_4: -; RV32I-NEXT: addi a1, a2, 8 -; RV32I-NEXT: lw a1, 0(a1) -; RV32I-NEXT: beqz a4, .LBB3_8 -; RV32I-NEXT: .LBB3_5: -; RV32I-NEXT: addi a2, a2, 12 -; RV32I-NEXT: j .LBB3_9 -; RV32I-NEXT: .LBB3_6: +; RV32I-NEXT: addi a4, a3, 8 +; RV32I-NEXT: addi a5, a3, 12 +; RV32I-NEXT: mv a2, a3 +; RV32I-NEXT: j .LBB3_3 +; RV32I-NEXT: .LBB3_2: ; RV32I-NEXT: addi a1, a2, 4 -; RV32I-NEXT: lw a5, 0(a1) -; RV32I-NEXT: bnez a4, .LBB3_4 -; RV32I-NEXT: .LBB3_7: # %entry -; RV32I-NEXT: addi a1, a3, 8 +; RV32I-NEXT: addi a4, a2, 8 +; RV32I-NEXT: addi a5, a2, 12 +; RV32I-NEXT: .LBB3_3: # %entry +; RV32I-NEXT: lw a3, 0(a5) +; RV32I-NEXT: sw a3, 12(a0) +; RV32I-NEXT: lw a3, 0(a4) +; RV32I-NEXT: sw a3, 8(a0) ; RV32I-NEXT: lw a1, 0(a1) -; RV32I-NEXT: bnez a4, .LBB3_5 -; RV32I-NEXT: .LBB3_8: # %entry -; RV32I-NEXT: addi a2, a3, 12 -; RV32I-NEXT: .LBB3_9: # %entry -; RV32I-NEXT: lw a2, 0(a2) -; RV32I-NEXT: sw a2, 12(a0) -; RV32I-NEXT: sw a1, 8(a0) -; RV32I-NEXT: sw a5, 4(a0) -; RV32I-NEXT: sw a6, 0(a0) +; RV32I-NEXT: sw a1, 4(a0) +; RV32I-NEXT: lw a1, 0(a2) +; RV32I-NEXT: sw a1, 0(a0) ; RV32I-NEXT: ret ; ; RV64I-LABEL: cmov128: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: andi a5, a0, 1 -; RV64I-NEXT: mv a0, a1 -; RV64I-NEXT: bnez a5, .LBB3_2 +; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: bnez a0, .LBB3_2 ; RV64I-NEXT: # %bb.1: # %entry -; RV64I-NEXT: mv a0, a3 +; RV64I-NEXT: mv a1, a3 ; RV64I-NEXT: mv a2, a4 ; RV64I-NEXT: .LBB3_2: # %entry +; RV64I-NEXT: mv a0, a1 ; RV64I-NEXT: mv a1, a2 ; RV64I-NEXT: ret entry: @@ -308,37 +280,39 @@ entry: define i32 @cmovdiffcc(i1 %a, i1 %b, i32 %c, i32 %d, i32 %e, i32 %f) nounwind { ; RV32I-LABEL: cmovdiffcc: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: andi a0, a0, 1 ; RV32I-NEXT: andi a1, a1, 1 -; RV32I-NEXT: beqz a0, .LBB7_3 +; RV32I-NEXT: beqz a1, .LBB7_3 ; RV32I-NEXT: # %bb.1: # %entry -; RV32I-NEXT: beqz a1, .LBB7_4 +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: beqz a0, .LBB7_4 ; RV32I-NEXT: .LBB7_2: # %entry ; RV32I-NEXT: add a0, a2, a4 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB7_3: # %entry -; RV32I-NEXT: mv a2, a3 -; RV32I-NEXT: bnez a1, .LBB7_2 -; RV32I-NEXT: .LBB7_4: # %entry ; RV32I-NEXT: mv a4, a5 +; RV32I-NEXT: andi a0, a0, 1 +; RV32I-NEXT: bnez a0, .LBB7_2 +; RV32I-NEXT: .LBB7_4: # %entry +; RV32I-NEXT: mv a2, a3 ; RV32I-NEXT: add a0, a2, a4 ; RV32I-NEXT: ret ; ; RV64I-LABEL: cmovdiffcc: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: andi a0, a0, 1 ; RV64I-NEXT: andi a1, a1, 1 -; RV64I-NEXT: beqz a0, .LBB7_3 +; RV64I-NEXT: beqz a1, .LBB7_3 ; RV64I-NEXT: # %bb.1: # %entry -; RV64I-NEXT: beqz a1, .LBB7_4 +; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: beqz a0, .LBB7_4 ; RV64I-NEXT: .LBB7_2: # %entry ; RV64I-NEXT: addw a0, a2, a4 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB7_3: # %entry -; RV64I-NEXT: mv a2, a3 -; RV64I-NEXT: bnez a1, .LBB7_2 -; RV64I-NEXT: .LBB7_4: # %entry ; RV64I-NEXT: mv a4, a5 +; RV64I-NEXT: andi a0, a0, 1 +; RV64I-NEXT: bnez a0, .LBB7_2 +; RV64I-NEXT: .LBB7_4: # %entry +; RV64I-NEXT: mv a2, a3 ; RV64I-NEXT: addw a0, a2, a4 ; RV64I-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/setcc-logic.ll b/llvm/test/CodeGen/RISCV/setcc-logic.ll index 0c29bf5..72f797d 100644 --- a/llvm/test/CodeGen/RISCV/setcc-logic.ll +++ b/llvm/test/CodeGen/RISCV/setcc-logic.ll @@ -7,17 +7,17 @@ define i1 @and_icmp_eq(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { ; RV32I-LABEL: and_icmp_eq: ; RV32I: # %bb.0: +; RV32I-NEXT: xor a2, a2, a3 ; RV32I-NEXT: xor a0, a0, a1 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: or a0, a0, a2 ; RV32I-NEXT: seqz a0, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and_icmp_eq: ; RV64I: # %bb.0: +; RV64I-NEXT: xor a2, a2, a3 ; RV64I-NEXT: xor a0, a0, a1 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: seqz a0, a0 @@ -31,17 +31,17 @@ define i1 @and_icmp_eq(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { define i1 @or_icmp_ne(i32 %a, i32 %b, i32 %c, i32 %d) nounwind { ; RV32I-LABEL: or_icmp_ne: ; RV32I: # %bb.0: +; RV32I-NEXT: xor a2, a2, a3 ; RV32I-NEXT: xor a0, a0, a1 -; RV32I-NEXT: xor a1, a2, a3 -; RV32I-NEXT: or a0, a0, a1 +; RV32I-NEXT: or a0, a0, a2 ; RV32I-NEXT: snez a0, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: or_icmp_ne: ; RV64I: # %bb.0: +; RV64I-NEXT: xor a2, a2, a3 ; RV64I-NEXT: xor a0, a0, a1 -; RV64I-NEXT: xor a1, a2, a3 -; RV64I-NEXT: or a0, a0, a1 +; RV64I-NEXT: or a0, a0, a2 ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 ; RV64I-NEXT: snez a0, a0 @@ -102,22 +102,22 @@ define i1 @and_icmps_const_1bit_diff(i32 %x) nounwind { define i1 @and_icmps_const_not1bit_diff(i32 %x) nounwind { ; RV32I-LABEL: and_icmps_const_not1bit_diff: ; RV32I: # %bb.0: -; RV32I-NEXT: xori a1, a0, 44 +; RV32I-NEXT: xori a1, a0, 92 ; RV32I-NEXT: snez a1, a1 -; RV32I-NEXT: xori a0, a0, 92 +; RV32I-NEXT: xori a0, a0, 44 ; RV32I-NEXT: snez a0, a0 -; RV32I-NEXT: and a0, a1, a0 +; RV32I-NEXT: and a0, a0, a1 ; RV32I-NEXT: ret ; ; RV64I-LABEL: and_icmps_const_not1bit_diff: ; RV64I: # %bb.0: ; RV64I-NEXT: slli a0, a0, 32 ; RV64I-NEXT: srli a0, a0, 32 -; RV64I-NEXT: xori a1, a0, 44 +; RV64I-NEXT: xori a1, a0, 92 ; RV64I-NEXT: snez a1, a1 -; RV64I-NEXT: xori a0, a0, 92 +; RV64I-NEXT: xori a0, a0, 44 ; RV64I-NEXT: snez a0, a0 -; RV64I-NEXT: and a0, a1, a0 +; RV64I-NEXT: and a0, a0, a1 ; RV64I-NEXT: ret %a = icmp ne i32 %x, 44 %b = icmp ne i32 %x, 92 diff --git a/llvm/test/CodeGen/RISCV/shifts.ll b/llvm/test/CodeGen/RISCV/shifts.ll index 54c49f3..80cf136 100644 --- a/llvm/test/CodeGen/RISCV/shifts.ll +++ b/llvm/test/CodeGen/RISCV/shifts.ll @@ -17,14 +17,13 @@ define i64 @lshr64(i64 %a, i64 %b) nounwind { ; RV32I-NEXT: mv a1, zero ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB0_2: -; RV32I-NEXT: srl a0, a0, a2 ; RV32I-NEXT: addi a3, zero, 31 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: slli a4, a1, 1 ; RV32I-NEXT: sll a3, a4, a3 +; RV32I-NEXT: srl a0, a0, a2 ; RV32I-NEXT: or a0, a0, a3 -; RV32I-NEXT: srl a2, a1, a2 -; RV32I-NEXT: mv a1, a2 +; RV32I-NEXT: srl a1, a1, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: lshr64: @@ -63,11 +62,11 @@ define i64 @ashr64(i64 %a, i64 %b) nounwind { ; RV32I-NEXT: srai a1, a1, 31 ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB2_2: -; RV32I-NEXT: srl a0, a0, a2 ; RV32I-NEXT: addi a3, zero, 31 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: slli a4, a1, 1 ; RV32I-NEXT: sll a3, a4, a3 +; RV32I-NEXT: srl a0, a0, a2 ; RV32I-NEXT: or a0, a0, a3 ; RV32I-NEXT: sra a1, a1, a2 ; RV32I-NEXT: ret @@ -108,14 +107,13 @@ define i64 @shl64(i64 %a, i64 %b) nounwind { ; RV32I-NEXT: mv a0, zero ; RV32I-NEXT: ret ; RV32I-NEXT: .LBB4_2: -; RV32I-NEXT: sll a1, a1, a2 ; RV32I-NEXT: addi a3, zero, 31 ; RV32I-NEXT: sub a3, a3, a2 ; RV32I-NEXT: srli a4, a0, 1 ; RV32I-NEXT: srl a3, a4, a3 +; RV32I-NEXT: sll a1, a1, a2 ; RV32I-NEXT: or a1, a1, a3 -; RV32I-NEXT: sll a2, a0, a2 -; RV32I-NEXT: mv a0, a2 +; RV32I-NEXT: sll a0, a0, a2 ; RV32I-NEXT: ret ; ; RV64I-LABEL: shl64: @@ -150,27 +148,27 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind { ; RV32I-NEXT: addi sp, sp, -48 ; RV32I-NEXT: sw ra, 44(sp) ; RV32I-NEXT: sw s0, 40(sp) -; RV32I-NEXT: lw a2, 0(a2) -; RV32I-NEXT: lw a3, 0(a1) -; RV32I-NEXT: lw a4, 4(a1) -; RV32I-NEXT: lw a5, 8(a1) -; RV32I-NEXT: lw a1, 12(a1) ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: sw a1, 20(sp) -; RV32I-NEXT: sw a5, 16(sp) -; RV32I-NEXT: sw a4, 12(sp) +; RV32I-NEXT: lw a0, 12(a1) +; RV32I-NEXT: sw a0, 20(sp) +; RV32I-NEXT: lw a0, 8(a1) +; RV32I-NEXT: sw a0, 16(sp) +; RV32I-NEXT: lw a0, 4(a1) +; RV32I-NEXT: sw a0, 12(sp) +; RV32I-NEXT: lw a0, 0(a1) +; RV32I-NEXT: sw a0, 8(sp) +; RV32I-NEXT: lw a2, 0(a2) ; RV32I-NEXT: addi a0, sp, 24 ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: sw a3, 8(sp) ; RV32I-NEXT: call __lshrti3 ; RV32I-NEXT: lw a0, 36(sp) -; RV32I-NEXT: lw a1, 32(sp) -; RV32I-NEXT: lw a2, 28(sp) -; RV32I-NEXT: lw a3, 24(sp) ; RV32I-NEXT: sw a0, 12(s0) -; RV32I-NEXT: sw a1, 8(s0) -; RV32I-NEXT: sw a2, 4(s0) -; RV32I-NEXT: sw a3, 0(s0) +; RV32I-NEXT: lw a0, 32(sp) +; RV32I-NEXT: sw a0, 8(s0) +; RV32I-NEXT: lw a0, 28(sp) +; RV32I-NEXT: sw a0, 4(s0) +; RV32I-NEXT: lw a0, 24(sp) +; RV32I-NEXT: sw a0, 0(s0) ; RV32I-NEXT: lw s0, 40(sp) ; RV32I-NEXT: lw ra, 44(sp) ; RV32I-NEXT: addi sp, sp, 48 @@ -185,14 +183,13 @@ define i128 @lshr128(i128 %a, i128 %b) nounwind { ; RV64I-NEXT: mv a1, zero ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB6_2: -; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: addi a3, zero, 63 ; RV64I-NEXT: sub a3, a3, a2 ; RV64I-NEXT: slli a4, a1, 1 ; RV64I-NEXT: sll a3, a4, a3 +; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: or a0, a0, a3 -; RV64I-NEXT: srl a2, a1, a2 -; RV64I-NEXT: mv a1, a2 +; RV64I-NEXT: srl a1, a1, a2 ; RV64I-NEXT: ret %1 = lshr i128 %a, %b ret i128 %1 @@ -204,27 +201,27 @@ define i128 @ashr128(i128 %a, i128 %b) nounwind { ; RV32I-NEXT: addi sp, sp, -48 ; RV32I-NEXT: sw ra, 44(sp) ; RV32I-NEXT: sw s0, 40(sp) -; RV32I-NEXT: lw a2, 0(a2) -; RV32I-NEXT: lw a3, 0(a1) -; RV32I-NEXT: lw a4, 4(a1) -; RV32I-NEXT: lw a5, 8(a1) -; RV32I-NEXT: lw a1, 12(a1) ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: sw a1, 20(sp) -; RV32I-NEXT: sw a5, 16(sp) -; RV32I-NEXT: sw a4, 12(sp) +; RV32I-NEXT: lw a0, 12(a1) +; RV32I-NEXT: sw a0, 20(sp) +; RV32I-NEXT: lw a0, 8(a1) +; RV32I-NEXT: sw a0, 16(sp) +; RV32I-NEXT: lw a0, 4(a1) +; RV32I-NEXT: sw a0, 12(sp) +; RV32I-NEXT: lw a0, 0(a1) +; RV32I-NEXT: sw a0, 8(sp) +; RV32I-NEXT: lw a2, 0(a2) ; RV32I-NEXT: addi a0, sp, 24 ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: sw a3, 8(sp) ; RV32I-NEXT: call __ashrti3 ; RV32I-NEXT: lw a0, 36(sp) -; RV32I-NEXT: lw a1, 32(sp) -; RV32I-NEXT: lw a2, 28(sp) -; RV32I-NEXT: lw a3, 24(sp) ; RV32I-NEXT: sw a0, 12(s0) -; RV32I-NEXT: sw a1, 8(s0) -; RV32I-NEXT: sw a2, 4(s0) -; RV32I-NEXT: sw a3, 0(s0) +; RV32I-NEXT: lw a0, 32(sp) +; RV32I-NEXT: sw a0, 8(s0) +; RV32I-NEXT: lw a0, 28(sp) +; RV32I-NEXT: sw a0, 4(s0) +; RV32I-NEXT: lw a0, 24(sp) +; RV32I-NEXT: sw a0, 0(s0) ; RV32I-NEXT: lw s0, 40(sp) ; RV32I-NEXT: lw ra, 44(sp) ; RV32I-NEXT: addi sp, sp, 48 @@ -239,11 +236,11 @@ define i128 @ashr128(i128 %a, i128 %b) nounwind { ; RV64I-NEXT: srai a1, a1, 63 ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB7_2: -; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: addi a3, zero, 63 ; RV64I-NEXT: sub a3, a3, a2 ; RV64I-NEXT: slli a4, a1, 1 ; RV64I-NEXT: sll a3, a4, a3 +; RV64I-NEXT: srl a0, a0, a2 ; RV64I-NEXT: or a0, a0, a3 ; RV64I-NEXT: sra a1, a1, a2 ; RV64I-NEXT: ret @@ -257,27 +254,27 @@ define i128 @shl128(i128 %a, i128 %b) nounwind { ; RV32I-NEXT: addi sp, sp, -48 ; RV32I-NEXT: sw ra, 44(sp) ; RV32I-NEXT: sw s0, 40(sp) -; RV32I-NEXT: lw a2, 0(a2) -; RV32I-NEXT: lw a3, 0(a1) -; RV32I-NEXT: lw a4, 4(a1) -; RV32I-NEXT: lw a5, 8(a1) -; RV32I-NEXT: lw a1, 12(a1) ; RV32I-NEXT: mv s0, a0 -; RV32I-NEXT: sw a1, 20(sp) -; RV32I-NEXT: sw a5, 16(sp) -; RV32I-NEXT: sw a4, 12(sp) +; RV32I-NEXT: lw a0, 12(a1) +; RV32I-NEXT: sw a0, 20(sp) +; RV32I-NEXT: lw a0, 8(a1) +; RV32I-NEXT: sw a0, 16(sp) +; RV32I-NEXT: lw a0, 4(a1) +; RV32I-NEXT: sw a0, 12(sp) +; RV32I-NEXT: lw a0, 0(a1) +; RV32I-NEXT: sw a0, 8(sp) +; RV32I-NEXT: lw a2, 0(a2) ; RV32I-NEXT: addi a0, sp, 24 ; RV32I-NEXT: addi a1, sp, 8 -; RV32I-NEXT: sw a3, 8(sp) ; RV32I-NEXT: call __ashlti3 ; RV32I-NEXT: lw a0, 36(sp) -; RV32I-NEXT: lw a1, 32(sp) -; RV32I-NEXT: lw a2, 28(sp) -; RV32I-NEXT: lw a3, 24(sp) ; RV32I-NEXT: sw a0, 12(s0) -; RV32I-NEXT: sw a1, 8(s0) -; RV32I-NEXT: sw a2, 4(s0) -; RV32I-NEXT: sw a3, 0(s0) +; RV32I-NEXT: lw a0, 32(sp) +; RV32I-NEXT: sw a0, 8(s0) +; RV32I-NEXT: lw a0, 28(sp) +; RV32I-NEXT: sw a0, 4(s0) +; RV32I-NEXT: lw a0, 24(sp) +; RV32I-NEXT: sw a0, 0(s0) ; RV32I-NEXT: lw s0, 40(sp) ; RV32I-NEXT: lw ra, 44(sp) ; RV32I-NEXT: addi sp, sp, 48 @@ -292,14 +289,13 @@ define i128 @shl128(i128 %a, i128 %b) nounwind { ; RV64I-NEXT: mv a0, zero ; RV64I-NEXT: ret ; RV64I-NEXT: .LBB8_2: -; RV64I-NEXT: sll a1, a1, a2 ; RV64I-NEXT: addi a3, zero, 63 ; RV64I-NEXT: sub a3, a3, a2 ; RV64I-NEXT: srli a4, a0, 1 ; RV64I-NEXT: srl a3, a4, a3 +; RV64I-NEXT: sll a1, a1, a2 ; RV64I-NEXT: or a1, a1, a3 -; RV64I-NEXT: sll a2, a0, a2 -; RV64I-NEXT: mv a0, a2 +; RV64I-NEXT: sll a0, a0, a2 ; RV64I-NEXT: ret %1 = shl i128 %a, %b ret i128 %1 diff --git a/llvm/test/CodeGen/RISCV/split-offsets.ll b/llvm/test/CodeGen/RISCV/split-offsets.ll index 8642425..731f6ca 100644 --- a/llvm/test/CodeGen/RISCV/split-offsets.ll +++ b/llvm/test/CodeGen/RISCV/split-offsets.ll @@ -11,33 +11,33 @@ define void @test1([65536 x i32]** %sp, [65536 x i32]* %t, i32 %n) { ; RV32I-LABEL: test1: ; RV32I: # %bb.0: # %entry -; RV32I-NEXT: lw a0, 0(a0) ; RV32I-NEXT: lui a2, 20 ; RV32I-NEXT: addi a2, a2, -1920 -; RV32I-NEXT: add a1, a1, a2 +; RV32I-NEXT: lw a0, 0(a0) ; RV32I-NEXT: add a0, a0, a2 -; RV32I-NEXT: addi a2, zero, 2 -; RV32I-NEXT: sw a2, 0(a0) ; RV32I-NEXT: addi a3, zero, 1 ; RV32I-NEXT: sw a3, 4(a0) -; RV32I-NEXT: sw a3, 0(a1) -; RV32I-NEXT: sw a2, 4(a1) +; RV32I-NEXT: addi a4, zero, 2 +; RV32I-NEXT: sw a4, 0(a0) +; RV32I-NEXT: add a0, a1, a2 +; RV32I-NEXT: sw a4, 4(a0) +; RV32I-NEXT: sw a3, 0(a0) ; RV32I-NEXT: .cfi_def_cfa_offset 0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: test1: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: ld a0, 0(a0) ; RV64I-NEXT: lui a2, 20 ; RV64I-NEXT: addiw a2, a2, -1920 -; RV64I-NEXT: add a1, a1, a2 +; RV64I-NEXT: ld a0, 0(a0) ; RV64I-NEXT: add a0, a0, a2 -; RV64I-NEXT: addi a2, zero, 2 -; RV64I-NEXT: sw a2, 0(a0) ; RV64I-NEXT: addi a3, zero, 1 ; RV64I-NEXT: sw a3, 4(a0) -; RV64I-NEXT: sw a3, 0(a1) -; RV64I-NEXT: sw a2, 4(a1) +; RV64I-NEXT: addi a4, zero, 2 +; RV64I-NEXT: sw a4, 0(a0) +; RV64I-NEXT: add a0, a1, a2 +; RV64I-NEXT: sw a4, 4(a0) +; RV64I-NEXT: sw a3, 0(a0) ; RV64I-NEXT: .cfi_def_cfa_offset 0 ; RV64I-NEXT: ret entry: @@ -57,20 +57,20 @@ entry: define void @test2([65536 x i32]** %sp, [65536 x i32]* %t, i32 %n) { ; RV32I-LABEL: test2: ; RV32I: # %bb.0: # %entry +; RV32I-NEXT: lui a3, 20 +; RV32I-NEXT: addi a3, a3, -1920 +; RV32I-NEXT: lw a0, 0(a0) +; RV32I-NEXT: add a0, a0, a3 +; RV32I-NEXT: add a1, a1, a3 ; RV32I-NEXT: mv a3, zero -; RV32I-NEXT: lw a4, 0(a0) -; RV32I-NEXT: lui a0, 20 -; RV32I-NEXT: addi a5, a0, -1920 -; RV32I-NEXT: add a0, a1, a5 -; RV32I-NEXT: add a1, a4, a5 ; RV32I-NEXT: bge a3, a2, .LBB1_2 ; RV32I-NEXT: .LBB1_1: # %while_body ; RV32I-NEXT: # =>This Inner Loop Header: Depth=1 +; RV32I-NEXT: sw a3, 4(a0) ; RV32I-NEXT: addi a4, a3, 1 -; RV32I-NEXT: sw a4, 0(a1) -; RV32I-NEXT: sw a3, 4(a1) ; RV32I-NEXT: sw a4, 0(a0) -; RV32I-NEXT: sw a3, 4(a0) +; RV32I-NEXT: sw a3, 4(a1) +; RV32I-NEXT: sw a4, 0(a1) ; RV32I-NEXT: mv a3, a4 ; RV32I-NEXT: blt a3, a2, .LBB1_1 ; RV32I-NEXT: .LBB1_2: # %while_end @@ -79,22 +79,22 @@ define void @test2([65536 x i32]** %sp, [65536 x i32]* %t, i32 %n) { ; ; RV64I-LABEL: test2: ; RV64I: # %bb.0: # %entry -; RV64I-NEXT: mv a3, zero -; RV64I-NEXT: ld a4, 0(a0) -; RV64I-NEXT: lui a0, 20 -; RV64I-NEXT: addiw a5, a0, -1920 -; RV64I-NEXT: add a0, a1, a5 -; RV64I-NEXT: add a1, a4, a5 +; RV64I-NEXT: lui a3, 20 +; RV64I-NEXT: addiw a3, a3, -1920 +; RV64I-NEXT: ld a0, 0(a0) +; RV64I-NEXT: add a0, a0, a3 +; RV64I-NEXT: add a1, a1, a3 ; RV64I-NEXT: sext.w a2, a2 +; RV64I-NEXT: mv a3, zero ; RV64I-NEXT: sext.w a4, a3 ; RV64I-NEXT: bge a4, a2, .LBB1_2 ; RV64I-NEXT: .LBB1_1: # %while_body ; RV64I-NEXT: # =>This Inner Loop Header: Depth=1 +; RV64I-NEXT: sw a3, 4(a0) ; RV64I-NEXT: addi a4, a3, 1 -; RV64I-NEXT: sw a4, 0(a1) -; RV64I-NEXT: sw a3, 4(a1) ; RV64I-NEXT: sw a4, 0(a0) -; RV64I-NEXT: sw a3, 4(a0) +; RV64I-NEXT: sw a3, 4(a1) +; RV64I-NEXT: sw a4, 0(a1) ; RV64I-NEXT: mv a3, a4 ; RV64I-NEXT: sext.w a4, a3 ; RV64I-NEXT: blt a4, a2, .LBB1_1 diff --git a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll index 30346a1..e0ac648 100644 --- a/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll +++ b/llvm/test/CodeGen/RISCV/umulo-128-legalisation-lowering.ll @@ -4,113 +4,111 @@ define { i128, i8 } @muloti_test(i128 %l, i128 %r) #0 { ; RISCV32-LABEL: muloti_test: ; RISCV32: # %bb.0: # %start -; RISCV32-NEXT: addi sp, sp, -96 -; RISCV32-NEXT: sw ra, 92(sp) -; RISCV32-NEXT: sw s0, 88(sp) -; RISCV32-NEXT: sw s1, 84(sp) -; RISCV32-NEXT: sw s2, 80(sp) -; RISCV32-NEXT: sw s3, 76(sp) -; RISCV32-NEXT: sw s4, 72(sp) -; RISCV32-NEXT: sw s5, 68(sp) -; RISCV32-NEXT: sw s6, 64(sp) -; RISCV32-NEXT: sw s7, 60(sp) -; RISCV32-NEXT: sw s8, 56(sp) -; RISCV32-NEXT: lw s2, 12(a1) -; RISCV32-NEXT: lw s6, 8(a1) -; RISCV32-NEXT: lw s3, 12(a2) -; RISCV32-NEXT: lw s7, 8(a2) -; RISCV32-NEXT: lw s0, 0(a1) -; RISCV32-NEXT: lw s8, 4(a1) -; RISCV32-NEXT: lw s1, 0(a2) -; RISCV32-NEXT: lw s5, 4(a2) -; RISCV32-NEXT: mv s4, a0 -; RISCV32-NEXT: sw zero, 20(sp) -; RISCV32-NEXT: sw zero, 16(sp) -; RISCV32-NEXT: sw zero, 36(sp) -; RISCV32-NEXT: sw zero, 32(sp) -; RISCV32-NEXT: sw s5, 12(sp) -; RISCV32-NEXT: sw s1, 8(sp) -; RISCV32-NEXT: sw s8, 28(sp) -; RISCV32-NEXT: addi a0, sp, 40 -; RISCV32-NEXT: addi a1, sp, 24 -; RISCV32-NEXT: addi a2, sp, 8 -; RISCV32-NEXT: sw s0, 24(sp) +; RISCV32-NEXT: addi sp, sp, -80 +; RISCV32-NEXT: sw ra, 76(sp) +; RISCV32-NEXT: sw s0, 72(sp) +; RISCV32-NEXT: sw s1, 68(sp) +; RISCV32-NEXT: sw s2, 64(sp) +; RISCV32-NEXT: sw s3, 60(sp) +; RISCV32-NEXT: sw s4, 56(sp) +; RISCV32-NEXT: sw s5, 52(sp) +; RISCV32-NEXT: sw s6, 48(sp) +; RISCV32-NEXT: mv s1, a2 +; RISCV32-NEXT: mv s0, a1 +; RISCV32-NEXT: mv s2, a0 +; RISCV32-NEXT: sw zero, 12(sp) +; RISCV32-NEXT: sw zero, 8(sp) +; RISCV32-NEXT: sw zero, 28(sp) +; RISCV32-NEXT: sw zero, 24(sp) +; RISCV32-NEXT: lw s3, 4(a2) +; RISCV32-NEXT: sw s3, 4(sp) +; RISCV32-NEXT: lw s5, 0(a2) +; RISCV32-NEXT: sw s5, 0(sp) +; RISCV32-NEXT: lw s4, 4(a1) +; RISCV32-NEXT: sw s4, 20(sp) +; RISCV32-NEXT: lw s6, 0(a1) +; RISCV32-NEXT: sw s6, 16(sp) +; RISCV32-NEXT: addi a0, sp, 32 +; RISCV32-NEXT: addi a1, sp, 16 +; RISCV32-NEXT: mv a2, sp ; RISCV32-NEXT: call __multi3 -; RISCV32-NEXT: mul a0, s8, s7 -; RISCV32-NEXT: mul a1, s3, s0 -; RISCV32-NEXT: add a0, a1, a0 -; RISCV32-NEXT: mulhu a5, s7, s0 -; RISCV32-NEXT: add a0, a5, a0 -; RISCV32-NEXT: mul a1, s5, s6 -; RISCV32-NEXT: mul a2, s2, s1 -; RISCV32-NEXT: add a1, a2, a1 -; RISCV32-NEXT: mulhu t0, s6, s1 -; RISCV32-NEXT: add t1, t0, a1 -; RISCV32-NEXT: add a6, t1, a0 -; RISCV32-NEXT: mul a1, s7, s0 -; RISCV32-NEXT: mul a3, s6, s1 -; RISCV32-NEXT: add a4, a3, a1 -; RISCV32-NEXT: lw a1, 52(sp) -; RISCV32-NEXT: lw a2, 48(sp) -; RISCV32-NEXT: sltu a3, a4, a3 -; RISCV32-NEXT: add a3, a6, a3 -; RISCV32-NEXT: add a3, a1, a3 -; RISCV32-NEXT: add a6, a2, a4 -; RISCV32-NEXT: sltu a2, a6, a2 -; RISCV32-NEXT: add a7, a3, a2 -; RISCV32-NEXT: beq a7, a1, .LBB0_2 +; RISCV32-NEXT: lw a0, 12(s0) +; RISCV32-NEXT: lw a1, 8(s0) +; RISCV32-NEXT: mul a2, s3, a1 +; RISCV32-NEXT: mul a3, a0, s5 +; RISCV32-NEXT: add a4, a3, a2 +; RISCV32-NEXT: lw a2, 12(s1) +; RISCV32-NEXT: lw a3, 8(s1) +; RISCV32-NEXT: mul a5, s4, a3 +; RISCV32-NEXT: mul s1, a2, s6 +; RISCV32-NEXT: add a5, s1, a5 +; RISCV32-NEXT: mul s1, a3, s6 +; RISCV32-NEXT: mul s0, a1, s5 +; RISCV32-NEXT: add s1, s0, s1 +; RISCV32-NEXT: sltu s0, s1, s0 +; RISCV32-NEXT: mulhu a6, a3, s6 +; RISCV32-NEXT: add t1, a6, a5 +; RISCV32-NEXT: mulhu t2, a1, s5 +; RISCV32-NEXT: add t3, t2, a4 +; RISCV32-NEXT: add a5, t3, t1 +; RISCV32-NEXT: add a5, a5, s0 +; RISCV32-NEXT: lw s0, 44(sp) +; RISCV32-NEXT: add a5, s0, a5 +; RISCV32-NEXT: lw a4, 40(sp) +; RISCV32-NEXT: add a7, a4, s1 +; RISCV32-NEXT: sltu t0, a7, a4 +; RISCV32-NEXT: add a5, a5, t0 +; RISCV32-NEXT: beq a5, s0, .LBB0_2 ; RISCV32-NEXT: # %bb.1: # %start -; RISCV32-NEXT: sltu a2, a7, a1 +; RISCV32-NEXT: sltu t0, a5, s0 ; RISCV32-NEXT: .LBB0_2: # %start -; RISCV32-NEXT: sltu a0, a0, a5 -; RISCV32-NEXT: snez a1, s8 -; RISCV32-NEXT: snez a3, s3 -; RISCV32-NEXT: and a1, a3, a1 -; RISCV32-NEXT: mulhu a3, s3, s0 -; RISCV32-NEXT: snez a3, a3 -; RISCV32-NEXT: or a1, a1, a3 -; RISCV32-NEXT: mulhu a3, s8, s7 -; RISCV32-NEXT: snez a3, a3 -; RISCV32-NEXT: or a1, a1, a3 -; RISCV32-NEXT: or a0, a1, a0 -; RISCV32-NEXT: sltu a1, t1, t0 -; RISCV32-NEXT: snez a3, s5 -; RISCV32-NEXT: snez a4, s2 -; RISCV32-NEXT: and a3, a4, a3 -; RISCV32-NEXT: mulhu a4, s2, s1 -; RISCV32-NEXT: snez a4, a4 -; RISCV32-NEXT: or a3, a3, a4 -; RISCV32-NEXT: mulhu a4, s5, s6 -; RISCV32-NEXT: snez a4, a4 -; RISCV32-NEXT: or a3, a3, a4 -; RISCV32-NEXT: or a1, a3, a1 -; RISCV32-NEXT: or a3, s7, s3 -; RISCV32-NEXT: snez a3, a3 -; RISCV32-NEXT: or a4, s6, s2 +; RISCV32-NEXT: snez a4, s3 +; RISCV32-NEXT: snez s1, a0 +; RISCV32-NEXT: and a4, s1, a4 +; RISCV32-NEXT: snez s1, s4 +; RISCV32-NEXT: snez s0, a2 +; RISCV32-NEXT: and s1, s0, s1 +; RISCV32-NEXT: mulhu s0, a2, s6 +; RISCV32-NEXT: snez s0, s0 +; RISCV32-NEXT: or s1, s1, s0 +; RISCV32-NEXT: mulhu s0, a0, s5 +; RISCV32-NEXT: snez s0, s0 +; RISCV32-NEXT: or a4, a4, s0 +; RISCV32-NEXT: sltu t2, t3, t2 +; RISCV32-NEXT: mulhu s0, s3, a1 +; RISCV32-NEXT: snez s0, s0 +; RISCV32-NEXT: or t3, a4, s0 +; RISCV32-NEXT: sltu s0, t1, a6 +; RISCV32-NEXT: mulhu a4, s4, a3 ; RISCV32-NEXT: snez a4, a4 -; RISCV32-NEXT: and a3, a4, a3 -; RISCV32-NEXT: or a1, a3, a1 +; RISCV32-NEXT: or a4, s1, a4 +; RISCV32-NEXT: lw s1, 36(sp) +; RISCV32-NEXT: sw s1, 4(s2) +; RISCV32-NEXT: lw s1, 32(sp) +; RISCV32-NEXT: sw s1, 0(s2) +; RISCV32-NEXT: sw a7, 8(s2) +; RISCV32-NEXT: sw a5, 12(s2) +; RISCV32-NEXT: or a4, a4, s0 +; RISCV32-NEXT: or a5, t3, t2 ; RISCV32-NEXT: or a0, a1, a0 -; RISCV32-NEXT: lw a1, 44(sp) -; RISCV32-NEXT: lw a3, 40(sp) -; RISCV32-NEXT: or a0, a0, a2 +; RISCV32-NEXT: or a1, a3, a2 +; RISCV32-NEXT: snez a1, a1 +; RISCV32-NEXT: snez a0, a0 +; RISCV32-NEXT: and a0, a0, a1 +; RISCV32-NEXT: or a0, a0, a5 +; RISCV32-NEXT: or a0, a0, a4 +; RISCV32-NEXT: or a0, a0, t0 ; RISCV32-NEXT: andi a0, a0, 1 -; RISCV32-NEXT: sw a1, 4(s4) -; RISCV32-NEXT: sw a3, 0(s4) -; RISCV32-NEXT: sw a6, 8(s4) -; RISCV32-NEXT: sw a7, 12(s4) -; RISCV32-NEXT: sb a0, 16(s4) -; RISCV32-NEXT: lw s8, 56(sp) -; RISCV32-NEXT: lw s7, 60(sp) -; RISCV32-NEXT: lw s6, 64(sp) -; RISCV32-NEXT: lw s5, 68(sp) -; RISCV32-NEXT: lw s4, 72(sp) -; RISCV32-NEXT: lw s3, 76(sp) -; RISCV32-NEXT: lw s2, 80(sp) -; RISCV32-NEXT: lw s1, 84(sp) -; RISCV32-NEXT: lw s0, 88(sp) -; RISCV32-NEXT: lw ra, 92(sp) -; RISCV32-NEXT: addi sp, sp, 96 +; RISCV32-NEXT: sb a0, 16(s2) +; RISCV32-NEXT: lw s6, 48(sp) +; RISCV32-NEXT: lw s5, 52(sp) +; RISCV32-NEXT: lw s4, 56(sp) +; RISCV32-NEXT: lw s3, 60(sp) +; RISCV32-NEXT: lw s2, 64(sp) +; RISCV32-NEXT: lw s1, 68(sp) +; RISCV32-NEXT: lw s0, 72(sp) +; RISCV32-NEXT: lw ra, 76(sp) +; RISCV32-NEXT: addi sp, sp, 80 ; RISCV32-NEXT: ret start: %0 = tail call { i128, i1 } @llvm.umul.with.overflow.i128(i128 %l, i128 %r) #2 diff --git a/llvm/test/CodeGen/RISCV/vararg.ll b/llvm/test/CodeGen/RISCV/vararg.ll index 9b906cb..b630a1b 100644 --- a/llvm/test/CodeGen/RISCV/vararg.ll +++ b/llvm/test/CodeGen/RISCV/vararg.ll @@ -46,9 +46,9 @@ define i32 @va1(i8* %fmt, ...) nounwind { ; ILP32-ILP32F-FPELIM-NEXT: sw a4, 32(sp) ; ILP32-ILP32F-FPELIM-NEXT: sw a3, 28(sp) ; ILP32-ILP32F-FPELIM-NEXT: sw a2, 24(sp) -; ILP32-ILP32F-FPELIM-NEXT: sw a1, 20(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi a1, sp, 24 ; ILP32-ILP32F-FPELIM-NEXT: sw a1, 12(sp) +; ILP32-ILP32F-FPELIM-NEXT: sw a0, 20(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 48 ; ILP32-ILP32F-FPELIM-NEXT: ret ; @@ -65,9 +65,9 @@ define i32 @va1(i8* %fmt, ...) nounwind { ; ILP32-ILP32F-WITHFP-NEXT: sw a4, 16(s0) ; ILP32-ILP32F-WITHFP-NEXT: sw a3, 12(s0) ; ILP32-ILP32F-WITHFP-NEXT: sw a2, 8(s0) -; ILP32-ILP32F-WITHFP-NEXT: sw a1, 4(s0) ; ILP32-ILP32F-WITHFP-NEXT: addi a1, s0, 8 ; ILP32-ILP32F-WITHFP-NEXT: sw a1, -12(s0) +; ILP32-ILP32F-WITHFP-NEXT: sw a0, 4(s0) ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) ; ILP32-ILP32F-WITHFP-NEXT: addi sp, sp, 48 @@ -83,9 +83,9 @@ define i32 @va1(i8* %fmt, ...) nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a4, 32(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a3, 28(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a2, 24(sp) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a1, 20(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a1, sp, 24 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a1, 12(sp) +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 20(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 48 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ret ; @@ -150,9 +150,9 @@ define i32 @va1_va_arg(i8* %fmt, ...) nounwind { ; ILP32-ILP32F-FPELIM-NEXT: sw a4, 32(sp) ; ILP32-ILP32F-FPELIM-NEXT: sw a3, 28(sp) ; ILP32-ILP32F-FPELIM-NEXT: sw a2, 24(sp) -; ILP32-ILP32F-FPELIM-NEXT: sw a1, 20(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi a1, sp, 24 ; ILP32-ILP32F-FPELIM-NEXT: sw a1, 12(sp) +; ILP32-ILP32F-FPELIM-NEXT: sw a0, 20(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 48 ; ILP32-ILP32F-FPELIM-NEXT: ret ; @@ -169,9 +169,9 @@ define i32 @va1_va_arg(i8* %fmt, ...) nounwind { ; ILP32-ILP32F-WITHFP-NEXT: sw a4, 16(s0) ; ILP32-ILP32F-WITHFP-NEXT: sw a3, 12(s0) ; ILP32-ILP32F-WITHFP-NEXT: sw a2, 8(s0) -; ILP32-ILP32F-WITHFP-NEXT: sw a1, 4(s0) ; ILP32-ILP32F-WITHFP-NEXT: addi a1, s0, 8 ; ILP32-ILP32F-WITHFP-NEXT: sw a1, -12(s0) +; ILP32-ILP32F-WITHFP-NEXT: sw a0, 4(s0) ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) ; ILP32-ILP32F-WITHFP-NEXT: addi sp, sp, 48 @@ -187,9 +187,9 @@ define i32 @va1_va_arg(i8* %fmt, ...) nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a4, 32(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a3, 28(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a2, 24(sp) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a1, 20(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a1, sp, 24 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a1, 12(sp) +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 20(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 48 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ret ; @@ -203,10 +203,10 @@ define i32 @va1_va_arg(i8* %fmt, ...) nounwind { ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 24 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a1, 8 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 24(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 ; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; @@ -223,10 +223,10 @@ define i32 @va1_va_arg(i8* %fmt, ...) nounwind { ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 32(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0) -; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, s0, 8 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, a1, 8 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, -24(s0) +; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, 8(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 @@ -256,9 +256,9 @@ define i32 @va1_va_arg_alloca(i8* %fmt, ...) nounwind { ; ILP32-ILP32F-FPELIM-NEXT: sw a4, 16(s0) ; ILP32-ILP32F-FPELIM-NEXT: sw a3, 12(s0) ; ILP32-ILP32F-FPELIM-NEXT: sw a2, 8(s0) -; ILP32-ILP32F-FPELIM-NEXT: sw a1, 4(s0) ; ILP32-ILP32F-FPELIM-NEXT: addi a0, s0, 8 ; ILP32-ILP32F-FPELIM-NEXT: sw a0, -16(s0) +; ILP32-ILP32F-FPELIM-NEXT: sw a1, 4(s0) ; ILP32-ILP32F-FPELIM-NEXT: addi a0, a1, 15 ; ILP32-ILP32F-FPELIM-NEXT: andi a0, a0, -16 ; ILP32-ILP32F-FPELIM-NEXT: sub a0, sp, a0 @@ -286,9 +286,9 @@ define i32 @va1_va_arg_alloca(i8* %fmt, ...) nounwind { ; ILP32-ILP32F-WITHFP-NEXT: sw a4, 16(s0) ; ILP32-ILP32F-WITHFP-NEXT: sw a3, 12(s0) ; ILP32-ILP32F-WITHFP-NEXT: sw a2, 8(s0) -; ILP32-ILP32F-WITHFP-NEXT: sw a1, 4(s0) ; ILP32-ILP32F-WITHFP-NEXT: addi a0, s0, 8 ; ILP32-ILP32F-WITHFP-NEXT: sw a0, -16(s0) +; ILP32-ILP32F-WITHFP-NEXT: sw a1, 4(s0) ; ILP32-ILP32F-WITHFP-NEXT: addi a0, a1, 15 ; ILP32-ILP32F-WITHFP-NEXT: andi a0, a0, -16 ; ILP32-ILP32F-WITHFP-NEXT: sub a0, sp, a0 @@ -316,9 +316,9 @@ define i32 @va1_va_arg_alloca(i8* %fmt, ...) nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a4, 16(s0) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a3, 12(s0) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a2, 8(s0) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a1, 4(s0) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, s0, 8 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, -16(s0) +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a1, 4(s0) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, a1, 15 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: andi a0, a0, -16 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sub a0, sp, a0 @@ -346,17 +346,17 @@ define i32 @va1_va_arg_alloca(i8* %fmt, ...) nounwind { ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(s0) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(s0) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(s0) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(s0) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, s0, 8 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 8 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, -32(s0) -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a1, 32 -; LP64-LP64F-LP64D-FPELIM-NEXT: srli a0, a0, 32 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 15 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, zero, 1 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 33 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a1, -16 -; LP64-LP64F-LP64D-FPELIM-NEXT: and a0, a0, a1 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, zero, 1 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a0, 33 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, -16 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 32 +; LP64-LP64F-LP64D-FPELIM-NEXT: srli a1, a1, 32 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a1, 15 +; LP64-LP64F-LP64D-FPELIM-NEXT: and a0, a1, a0 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd s1, 8(s0) ; LP64-LP64F-LP64D-FPELIM-NEXT: sub a0, sp, a0 ; LP64-LP64F-LP64D-FPELIM-NEXT: mv sp, a0 ; LP64-LP64F-LP64D-FPELIM-NEXT: call notdead @@ -382,17 +382,17 @@ define i32 @va1_va_arg_alloca(i8* %fmt, ...) nounwind { ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 32(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0) -; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, s0, 8 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 8 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -32(s0) -; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a1, 32 -; LP64-LP64F-LP64D-WITHFP-NEXT: srli a0, a0, 32 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 15 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, zero, 1 -; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 33 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, a1, -16 -; LP64-LP64F-LP64D-WITHFP-NEXT: and a0, a0, a1 +; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, zero, 1 +; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a0, 33 +; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, -16 +; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 32 +; LP64-LP64F-LP64D-WITHFP-NEXT: srli a1, a1, 32 +; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, a1, 15 +; LP64-LP64F-LP64D-WITHFP-NEXT: and a0, a1, a0 +; LP64-LP64F-LP64D-WITHFP-NEXT: sd s1, 8(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sub a0, sp, a0 ; LP64-LP64F-LP64D-WITHFP-NEXT: mv sp, a0 ; LP64-LP64F-LP64D-WITHFP-NEXT: call notdead @@ -419,9 +419,9 @@ define void @va1_caller() nounwind { ; ILP32-ILP32F-FPELIM: # %bb.0: ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, -16 ; ILP32-ILP32F-FPELIM-NEXT: sw ra, 12(sp) +; ILP32-ILP32F-FPELIM-NEXT: mv a2, zero ; ILP32-ILP32F-FPELIM-NEXT: lui a3, 261888 ; ILP32-ILP32F-FPELIM-NEXT: addi a4, zero, 2 -; ILP32-ILP32F-FPELIM-NEXT: mv a2, zero ; ILP32-ILP32F-FPELIM-NEXT: call va1 ; ILP32-ILP32F-FPELIM-NEXT: lw ra, 12(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 16 @@ -433,9 +433,9 @@ define void @va1_caller() nounwind { ; ILP32-ILP32F-WITHFP-NEXT: sw ra, 12(sp) ; ILP32-ILP32F-WITHFP-NEXT: sw s0, 8(sp) ; ILP32-ILP32F-WITHFP-NEXT: addi s0, sp, 16 +; ILP32-ILP32F-WITHFP-NEXT: mv a2, zero ; ILP32-ILP32F-WITHFP-NEXT: lui a3, 261888 ; ILP32-ILP32F-WITHFP-NEXT: addi a4, zero, 2 -; ILP32-ILP32F-WITHFP-NEXT: mv a2, zero ; ILP32-ILP32F-WITHFP-NEXT: call va1 ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) @@ -446,9 +446,9 @@ define void @va1_caller() nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM: # %bb.0: ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, -16 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw ra, 12(sp) +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: mv a2, zero ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a3, 261888 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a4, zero, 2 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: mv a2, zero ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va1 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 12(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 16 @@ -498,10 +498,10 @@ define i64 @va2(i8 *%fmt, ...) nounwind { ; ILP32-ILP32F-FPELIM-NEXT: sw a3, 28(sp) ; ILP32-ILP32F-FPELIM-NEXT: sw a2, 24(sp) ; ILP32-ILP32F-FPELIM-NEXT: sw a1, 20(sp) -; ILP32-ILP32F-FPELIM-NEXT: addi a0, sp, 27 -; ILP32-ILP32F-FPELIM-NEXT: andi a1, a0, -8 ; ILP32-ILP32F-FPELIM-NEXT: addi a0, sp, 35 ; ILP32-ILP32F-FPELIM-NEXT: sw a0, 12(sp) +; ILP32-ILP32F-FPELIM-NEXT: addi a0, sp, 27 +; ILP32-ILP32F-FPELIM-NEXT: andi a1, a0, -8 ; ILP32-ILP32F-FPELIM-NEXT: lw a0, 0(a1) ; ILP32-ILP32F-FPELIM-NEXT: ori a1, a1, 4 ; ILP32-ILP32F-FPELIM-NEXT: lw a1, 0(a1) @@ -521,10 +521,10 @@ define i64 @va2(i8 *%fmt, ...) nounwind { ; ILP32-ILP32F-WITHFP-NEXT: sw a3, 12(s0) ; ILP32-ILP32F-WITHFP-NEXT: sw a2, 8(s0) ; ILP32-ILP32F-WITHFP-NEXT: sw a1, 4(s0) -; ILP32-ILP32F-WITHFP-NEXT: addi a0, s0, 11 -; ILP32-ILP32F-WITHFP-NEXT: andi a1, a0, -8 ; ILP32-ILP32F-WITHFP-NEXT: addi a0, s0, 19 ; ILP32-ILP32F-WITHFP-NEXT: sw a0, -12(s0) +; ILP32-ILP32F-WITHFP-NEXT: addi a0, s0, 11 +; ILP32-ILP32F-WITHFP-NEXT: andi a1, a0, -8 ; ILP32-ILP32F-WITHFP-NEXT: lw a0, 0(a1) ; ILP32-ILP32F-WITHFP-NEXT: ori a1, a1, 4 ; ILP32-ILP32F-WITHFP-NEXT: lw a1, 0(a1) @@ -543,10 +543,10 @@ define i64 @va2(i8 *%fmt, ...) nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a3, 28(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a2, 24(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a1, 20(sp) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, sp, 27 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: andi a1, a0, -8 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, sp, 35 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 12(sp) +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, sp, 27 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: andi a1, a0, -8 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw a0, 0(a1) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ori a1, a1, 4 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw a1, 0(a1) @@ -556,6 +556,8 @@ define i64 @va2(i8 *%fmt, ...) nounwind { ; LP64-LP64F-LP64D-FPELIM-LABEL: va2: ; LP64-LP64F-LP64D-FPELIM: # %bb.0: ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -80 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, sp, 24 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 72(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 64(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 56(sp) @@ -563,8 +565,6 @@ define i64 @va2(i8 *%fmt, ...) nounwind { ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, sp, 24 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: lw a0, 8(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 7 ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a0, 32 @@ -585,6 +585,8 @@ define i64 @va2(i8 *%fmt, ...) nounwind { ; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 24(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 16(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 32 +; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, s0, 8 +; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a7, 56(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a6, 48(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 40(s0) @@ -592,8 +594,6 @@ define i64 @va2(i8 *%fmt, ...) nounwind { ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0) -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, s0, 8 -; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: lw a0, -24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 7 ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a0, 32 @@ -705,10 +705,10 @@ define i64 @va2_va_arg(i8 *%fmt, ...) nounwind { ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 48(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 24 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a1, 8 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 24(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 ; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; @@ -725,10 +725,10 @@ define i64 @va2_va_arg(i8 *%fmt, ...) nounwind { ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 32(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0) -; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, s0, 8 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, a1, 8 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, -24(s0) +; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, 8(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 @@ -747,8 +747,8 @@ define void @va2_caller() nounwind { ; ILP32-ILP32F-FPELIM: # %bb.0: ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, -16 ; ILP32-ILP32F-FPELIM-NEXT: sw ra, 12(sp) -; ILP32-ILP32F-FPELIM-NEXT: lui a3, 261888 ; ILP32-ILP32F-FPELIM-NEXT: mv a2, zero +; ILP32-ILP32F-FPELIM-NEXT: lui a3, 261888 ; ILP32-ILP32F-FPELIM-NEXT: call va2 ; ILP32-ILP32F-FPELIM-NEXT: lw ra, 12(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 16 @@ -760,8 +760,8 @@ define void @va2_caller() nounwind { ; ILP32-ILP32F-WITHFP-NEXT: sw ra, 12(sp) ; ILP32-ILP32F-WITHFP-NEXT: sw s0, 8(sp) ; ILP32-ILP32F-WITHFP-NEXT: addi s0, sp, 16 -; ILP32-ILP32F-WITHFP-NEXT: lui a3, 261888 ; ILP32-ILP32F-WITHFP-NEXT: mv a2, zero +; ILP32-ILP32F-WITHFP-NEXT: lui a3, 261888 ; ILP32-ILP32F-WITHFP-NEXT: call va2 ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) @@ -772,8 +772,8 @@ define void @va2_caller() nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM: # %bb.0: ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, -16 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw ra, 12(sp) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a3, 261888 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: mv a2, zero +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a3, 261888 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va2 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 12(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 16 @@ -819,16 +819,16 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind { ; ILP32-ILP32F-FPELIM-NEXT: sw a5, 20(sp) ; ILP32-ILP32F-FPELIM-NEXT: sw a4, 16(sp) ; ILP32-ILP32F-FPELIM-NEXT: sw a3, 12(sp) +; ILP32-ILP32F-FPELIM-NEXT: addi a0, sp, 27 +; ILP32-ILP32F-FPELIM-NEXT: sw a0, 4(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi a0, sp, 19 ; ILP32-ILP32F-FPELIM-NEXT: andi a0, a0, -8 -; ILP32-ILP32F-FPELIM-NEXT: addi a3, sp, 27 -; ILP32-ILP32F-FPELIM-NEXT: sw a3, 4(sp) -; ILP32-ILP32F-FPELIM-NEXT: lw a3, 0(a0) -; ILP32-ILP32F-FPELIM-NEXT: ori a0, a0, 4 -; ILP32-ILP32F-FPELIM-NEXT: lw a4, 0(a0) -; ILP32-ILP32F-FPELIM-NEXT: add a0, a1, a3 +; ILP32-ILP32F-FPELIM-NEXT: ori a3, a0, 4 +; ILP32-ILP32F-FPELIM-NEXT: lw a3, 0(a3) +; ILP32-ILP32F-FPELIM-NEXT: add a2, a2, a3 +; ILP32-ILP32F-FPELIM-NEXT: lw a0, 0(a0) +; ILP32-ILP32F-FPELIM-NEXT: add a0, a1, a0 ; ILP32-ILP32F-FPELIM-NEXT: sltu a1, a0, a1 -; ILP32-ILP32F-FPELIM-NEXT: add a2, a2, a4 ; ILP32-ILP32F-FPELIM-NEXT: add a1, a2, a1 ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 32 ; ILP32-ILP32F-FPELIM-NEXT: ret @@ -844,16 +844,16 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind { ; ILP32-ILP32F-WITHFP-NEXT: sw a5, 12(s0) ; ILP32-ILP32F-WITHFP-NEXT: sw a4, 8(s0) ; ILP32-ILP32F-WITHFP-NEXT: sw a3, 4(s0) +; ILP32-ILP32F-WITHFP-NEXT: addi a0, s0, 19 +; ILP32-ILP32F-WITHFP-NEXT: sw a0, -12(s0) ; ILP32-ILP32F-WITHFP-NEXT: addi a0, s0, 11 ; ILP32-ILP32F-WITHFP-NEXT: andi a0, a0, -8 -; ILP32-ILP32F-WITHFP-NEXT: addi a3, s0, 19 -; ILP32-ILP32F-WITHFP-NEXT: sw a3, -12(s0) -; ILP32-ILP32F-WITHFP-NEXT: lw a3, 0(a0) -; ILP32-ILP32F-WITHFP-NEXT: ori a0, a0, 4 -; ILP32-ILP32F-WITHFP-NEXT: lw a4, 0(a0) -; ILP32-ILP32F-WITHFP-NEXT: add a0, a1, a3 +; ILP32-ILP32F-WITHFP-NEXT: ori a3, a0, 4 +; ILP32-ILP32F-WITHFP-NEXT: lw a3, 0(a3) +; ILP32-ILP32F-WITHFP-NEXT: add a2, a2, a3 +; ILP32-ILP32F-WITHFP-NEXT: lw a0, 0(a0) +; ILP32-ILP32F-WITHFP-NEXT: add a0, a1, a0 ; ILP32-ILP32F-WITHFP-NEXT: sltu a1, a0, a1 -; ILP32-ILP32F-WITHFP-NEXT: add a2, a2, a4 ; ILP32-ILP32F-WITHFP-NEXT: add a1, a2, a1 ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 16(sp) ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 20(sp) @@ -868,16 +868,16 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a5, 20(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a4, 16(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a3, 12(sp) +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, sp, 27 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 4(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, sp, 19 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: andi a0, a0, -8 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a3, sp, 27 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a3, 4(sp) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw a3, 0(a0) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ori a0, a0, 4 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw a4, 0(a0) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add a0, a1, a3 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ori a3, a0, 4 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw a3, 0(a3) +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add a2, a2, a3 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw a0, 0(a0) +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add a0, a1, a0 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sltu a1, a0, a1 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add a2, a2, a4 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add a1, a2, a1 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 32 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ret @@ -885,15 +885,15 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind { ; LP64-LP64F-LP64D-FPELIM-LABEL: va3: ; LP64-LP64F-LP64D-FPELIM: # %bb.0: ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, -64 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, sp, 16 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a7, 56(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a6, 48(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 40(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, sp, 16 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: lw a0, 8(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: lw a0, 8(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 7 ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a0, 32 ; LP64-LP64F-LP64D-FPELIM-NEXT: srli a2, a2, 32 @@ -914,15 +914,15 @@ define i64 @va3(i32 %a, i64 %b, ...) nounwind { ; LP64-LP64F-LP64D-WITHFP-NEXT: sd ra, 24(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd s0, 16(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: addi s0, sp, 32 +; LP64-LP64F-LP64D-WITHFP-NEXT: mv a0, s0 +; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a7, 40(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a6, 32(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 16(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 8(s0) -; LP64-LP64F-LP64D-WITHFP-NEXT: mv a0, s0 -; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -24(s0) -; LP64-LP64F-LP64D-WITHFP-NEXT: lw a0, -24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 0(s0) +; LP64-LP64F-LP64D-WITHFP-NEXT: lw a0, -24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 7 ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a2, a0, 32 ; LP64-LP64F-LP64D-WITHFP-NEXT: srli a2, a2, 32 @@ -973,9 +973,9 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind { ; ILP32-ILP32F-FPELIM-NEXT: addi a4, a3, 4 ; ILP32-ILP32F-FPELIM-NEXT: sw a4, 4(sp) ; ILP32-ILP32F-FPELIM-NEXT: lw a3, 0(a3) +; ILP32-ILP32F-FPELIM-NEXT: add a2, a2, a3 ; ILP32-ILP32F-FPELIM-NEXT: add a0, a1, a0 ; ILP32-ILP32F-FPELIM-NEXT: sltu a1, a0, a1 -; ILP32-ILP32F-FPELIM-NEXT: add a2, a2, a3 ; ILP32-ILP32F-FPELIM-NEXT: add a1, a2, a1 ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 32 ; ILP32-ILP32F-FPELIM-NEXT: ret @@ -999,9 +999,9 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind { ; ILP32-ILP32F-WITHFP-NEXT: addi a4, a3, 4 ; ILP32-ILP32F-WITHFP-NEXT: sw a4, -12(s0) ; ILP32-ILP32F-WITHFP-NEXT: lw a3, 0(a3) +; ILP32-ILP32F-WITHFP-NEXT: add a2, a2, a3 ; ILP32-ILP32F-WITHFP-NEXT: add a0, a1, a0 ; ILP32-ILP32F-WITHFP-NEXT: sltu a1, a0, a1 -; ILP32-ILP32F-WITHFP-NEXT: add a2, a2, a3 ; ILP32-ILP32F-WITHFP-NEXT: add a1, a2, a1 ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 16(sp) ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 20(sp) @@ -1023,9 +1023,9 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: fld ft0, 0(a0) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: fsd ft0, 8(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw a0, 12(sp) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw a3, 8(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add a2, a2, a0 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add a0, a1, a3 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw a0, 8(sp) +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add a0, a1, a0 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sltu a1, a0, a1 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add a1, a2, a1 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 48 @@ -1039,11 +1039,11 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind { ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a5, 40(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a4, 32(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, sp, 16 -; LP64-LP64F-LP64D-FPELIM-NEXT: ori a3, a0, 8 +; LP64-LP64F-LP64D-FPELIM-NEXT: ori a0, a0, 8 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 16(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: add a0, a1, a2 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 8(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 64 ; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; @@ -1058,11 +1058,11 @@ define i64 @va3_va_arg(i32 %a, i64 %b, ...) nounwind { ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a5, 24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a4, 16(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 8(s0) -; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 0(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: mv a0, s0 -; LP64-LP64F-LP64D-WITHFP-NEXT: ori a3, a0, 8 +; LP64-LP64F-LP64D-WITHFP-NEXT: ori a0, a0, 8 +; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, -24(s0) +; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 0(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: add a0, a1, a2 -; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, -24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 80 @@ -1084,9 +1084,9 @@ define void @va3_caller() nounwind { ; ILP32-ILP32F-FPELIM-NEXT: sw ra, 12(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi a0, zero, 2 ; ILP32-ILP32F-FPELIM-NEXT: addi a1, zero, 1111 -; ILP32-ILP32F-FPELIM-NEXT: lui a5, 262144 ; ILP32-ILP32F-FPELIM-NEXT: mv a2, zero ; ILP32-ILP32F-FPELIM-NEXT: mv a4, zero +; ILP32-ILP32F-FPELIM-NEXT: lui a5, 262144 ; ILP32-ILP32F-FPELIM-NEXT: call va3 ; ILP32-ILP32F-FPELIM-NEXT: lw ra, 12(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 16 @@ -1100,9 +1100,9 @@ define void @va3_caller() nounwind { ; ILP32-ILP32F-WITHFP-NEXT: addi s0, sp, 16 ; ILP32-ILP32F-WITHFP-NEXT: addi a0, zero, 2 ; ILP32-ILP32F-WITHFP-NEXT: addi a1, zero, 1111 -; ILP32-ILP32F-WITHFP-NEXT: lui a5, 262144 ; ILP32-ILP32F-WITHFP-NEXT: mv a2, zero ; ILP32-ILP32F-WITHFP-NEXT: mv a4, zero +; ILP32-ILP32F-WITHFP-NEXT: lui a5, 262144 ; ILP32-ILP32F-WITHFP-NEXT: call va3 ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) @@ -1115,9 +1115,9 @@ define void @va3_caller() nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw ra, 12(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, zero, 2 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a1, zero, 1111 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a5, 262144 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: mv a2, zero ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: mv a4, zero +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a5, 262144 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va3 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 12(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 16 @@ -1190,9 +1190,9 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind { ; ILP32-ILP32F-FPELIM-NEXT: andi a0, a0, -4 ; ILP32-ILP32F-FPELIM-NEXT: addi a3, a0, 4 ; ILP32-ILP32F-FPELIM-NEXT: sw a3, 4(sp) -; ILP32-ILP32F-FPELIM-NEXT: lw a0, 0(a0) ; ILP32-ILP32F-FPELIM-NEXT: add a1, a1, s0 ; ILP32-ILP32F-FPELIM-NEXT: add a1, a1, a2 +; ILP32-ILP32F-FPELIM-NEXT: lw a0, 0(a0) ; ILP32-ILP32F-FPELIM-NEXT: add a0, a1, a0 ; ILP32-ILP32F-FPELIM-NEXT: lw s0, 8(sp) ; ILP32-ILP32F-FPELIM-NEXT: lw ra, 12(sp) @@ -1233,9 +1233,9 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind { ; ILP32-ILP32F-WITHFP-NEXT: andi a0, a0, -4 ; ILP32-ILP32F-WITHFP-NEXT: addi a3, a0, 4 ; ILP32-ILP32F-WITHFP-NEXT: sw a3, -16(s0) -; ILP32-ILP32F-WITHFP-NEXT: lw a0, 0(a0) ; ILP32-ILP32F-WITHFP-NEXT: add a1, a1, s1 ; ILP32-ILP32F-WITHFP-NEXT: add a1, a1, a2 +; ILP32-ILP32F-WITHFP-NEXT: lw a0, 0(a0) ; ILP32-ILP32F-WITHFP-NEXT: add a0, a1, a0 ; ILP32-ILP32F-WITHFP-NEXT: lw s1, 20(sp) ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 24(sp) @@ -1275,9 +1275,9 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: andi a0, a0, -4 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a3, a0, 4 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a3, 4(sp) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw a0, 0(a0) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add a1, a1, s0 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add a1, a1, a2 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw a0, 0(a0) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: add a0, a1, a0 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw s0, 8(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 12(sp) @@ -1317,9 +1317,9 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind { ; LP64-LP64F-LP64D-FPELIM-NEXT: andi a0, a0, -4 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, a0, 8 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 8(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: ld a0, 0(a0) ; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, a1, s0 ; LP64-LP64F-LP64D-FPELIM-NEXT: add a1, a1, a2 +; LP64-LP64F-LP64D-FPELIM-NEXT: ld a0, 0(a0) ; LP64-LP64F-LP64D-FPELIM-NEXT: addw a0, a1, a0 ; LP64-LP64F-LP64D-FPELIM-NEXT: ld s0, 16(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 24(sp) @@ -1361,9 +1361,9 @@ define i32 @va4_va_copy(i32 %argno, ...) nounwind { ; LP64-LP64F-LP64D-WITHFP-NEXT: andi a0, a0, -4 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a3, a0, 8 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, -32(s0) -; LP64-LP64F-LP64D-WITHFP-NEXT: ld a0, 0(a0) ; LP64-LP64F-LP64D-WITHFP-NEXT: add a1, a1, s1 ; LP64-LP64F-LP64D-WITHFP-NEXT: add a1, a1, a2 +; LP64-LP64F-LP64D-WITHFP-NEXT: ld a0, 0(a0) ; LP64-LP64F-LP64D-WITHFP-NEXT: addw a0, a1, a0 ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s1, 24(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 32(sp) @@ -1425,7 +1425,8 @@ define void @va5_aligned_stack_caller() nounwind { ; ILP32-ILP32F-FPELIM-NEXT: addi a0, a0, -328 ; ILP32-ILP32F-FPELIM-NEXT: sw a0, 36(sp) ; ILP32-ILP32F-FPELIM-NEXT: lui a0, 335544 -; ILP32-ILP32F-FPELIM-NEXT: addi a5, a0, 1311 +; ILP32-ILP32F-FPELIM-NEXT: addi a0, a0, 1311 +; ILP32-ILP32F-FPELIM-NEXT: sw a0, 32(sp) ; ILP32-ILP32F-FPELIM-NEXT: lui a0, 688509 ; ILP32-ILP32F-FPELIM-NEXT: addi a6, a0, -2048 ; ILP32-ILP32F-FPELIM-NEXT: addi a2, sp, 32 @@ -1434,7 +1435,6 @@ define void @va5_aligned_stack_caller() nounwind { ; ILP32-ILP32F-FPELIM-NEXT: addi a3, zero, 12 ; ILP32-ILP32F-FPELIM-NEXT: addi a4, zero, 13 ; ILP32-ILP32F-FPELIM-NEXT: addi a7, zero, 4 -; ILP32-ILP32F-FPELIM-NEXT: sw a5, 32(sp) ; ILP32-ILP32F-FPELIM-NEXT: call va5_aligned_stack_callee ; ILP32-ILP32F-FPELIM-NEXT: lw ra, 60(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 64 @@ -1470,7 +1470,8 @@ define void @va5_aligned_stack_caller() nounwind { ; ILP32-ILP32F-WITHFP-NEXT: addi a0, a0, -328 ; ILP32-ILP32F-WITHFP-NEXT: sw a0, -28(s0) ; ILP32-ILP32F-WITHFP-NEXT: lui a0, 335544 -; ILP32-ILP32F-WITHFP-NEXT: addi a5, a0, 1311 +; ILP32-ILP32F-WITHFP-NEXT: addi a0, a0, 1311 +; ILP32-ILP32F-WITHFP-NEXT: sw a0, -32(s0) ; ILP32-ILP32F-WITHFP-NEXT: lui a0, 688509 ; ILP32-ILP32F-WITHFP-NEXT: addi a6, a0, -2048 ; ILP32-ILP32F-WITHFP-NEXT: addi a2, s0, -32 @@ -1479,7 +1480,6 @@ define void @va5_aligned_stack_caller() nounwind { ; ILP32-ILP32F-WITHFP-NEXT: addi a3, zero, 12 ; ILP32-ILP32F-WITHFP-NEXT: addi a4, zero, 13 ; ILP32-ILP32F-WITHFP-NEXT: addi a7, zero, 4 -; ILP32-ILP32F-WITHFP-NEXT: sw a5, -32(s0) ; ILP32-ILP32F-WITHFP-NEXT: call va5_aligned_stack_callee ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 56(sp) ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 60(sp) @@ -1514,7 +1514,8 @@ define void @va5_aligned_stack_caller() nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, a0, -328 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 36(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a0, 335544 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a5, a0, 1311 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a0, a0, 1311 +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 32(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lui a0, 688509 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a6, a0, -2048 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a2, sp, 32 @@ -1523,7 +1524,6 @@ define void @va5_aligned_stack_caller() nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a3, zero, 12 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a4, zero, 13 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a7, zero, 4 -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a5, 32(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: call va5_aligned_stack_callee ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: lw ra, 60(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 64 @@ -1546,33 +1546,33 @@ define void @va5_aligned_stack_caller() nounwind { ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a0, 14 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 655 ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a0, 12 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi t0, a0, 1475 -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 1192 -; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a0, a0, 381 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a0, 12 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a6, a0, -2048 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 1475 +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 0(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 1048248 ; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a0, a0, 1311 ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a0, 12 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, -1147 ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a0, 13 +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a1, 512 +; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a1, a1, 73 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 15 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a1, -1311 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 12 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, a1, 1147 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a1, a1, 14 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 983 ; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a0, 15 +; LP64-LP64F-LP64D-FPELIM-NEXT: lui a2, 1192 +; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a2, a2, 381 +; LP64-LP64F-LP64D-FPELIM-NEXT: slli a2, a2, 12 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a6, a2, -2048 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a2, a0, 1311 -; LP64-LP64F-LP64D-FPELIM-NEXT: lui a0, 512 -; LP64-LP64F-LP64D-FPELIM-NEXT: addiw a0, a0, 73 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a0, 15 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, -1311 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a0, 12 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, a0, 1147 -; LP64-LP64F-LP64D-FPELIM-NEXT: slli a0, a0, 14 -; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, a0, -1967 +; LP64-LP64F-LP64D-FPELIM-NEXT: addi a3, a1, -1967 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a0, zero, 1 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, zero, 11 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a4, zero, 12 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a5, zero, 13 ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a7, zero, 14 -; LP64-LP64F-LP64D-FPELIM-NEXT: sd t0, 0(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: call va5_aligned_stack_callee ; LP64-LP64F-LP64D-FPELIM-NEXT: ld ra, 40(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 48 @@ -1597,33 +1597,33 @@ define void @va5_aligned_stack_caller() nounwind { ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a0, 14 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 655 ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a0, 12 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi t0, a0, 1475 -; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, 1192 -; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a0, a0, 381 -; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a0, 12 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a6, a0, -2048 +; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 1475 +; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, 0(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, 1048248 ; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a0, a0, 1311 ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a0, 12 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, -1147 ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a0, 13 +; LP64-LP64F-LP64D-WITHFP-NEXT: lui a1, 512 +; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a1, a1, 73 +; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 15 +; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, a1, -1311 +; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 12 +; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, a1, 1147 +; LP64-LP64F-LP64D-WITHFP-NEXT: slli a1, a1, 14 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 983 ; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a0, 15 +; LP64-LP64F-LP64D-WITHFP-NEXT: lui a2, 1192 +; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a2, a2, 381 +; LP64-LP64F-LP64D-WITHFP-NEXT: slli a2, a2, 12 +; LP64-LP64F-LP64D-WITHFP-NEXT: addi a6, a2, -2048 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a2, a0, 1311 -; LP64-LP64F-LP64D-WITHFP-NEXT: lui a0, 512 -; LP64-LP64F-LP64D-WITHFP-NEXT: addiw a0, a0, 73 -; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a0, 15 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, -1311 -; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a0, 12 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, a0, 1147 -; LP64-LP64F-LP64D-WITHFP-NEXT: slli a0, a0, 14 -; LP64-LP64F-LP64D-WITHFP-NEXT: addi a3, a0, -1967 +; LP64-LP64F-LP64D-WITHFP-NEXT: addi a3, a1, -1967 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a0, zero, 1 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a1, zero, 11 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a4, zero, 12 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a5, zero, 13 ; LP64-LP64F-LP64D-WITHFP-NEXT: addi a7, zero, 14 -; LP64-LP64F-LP64D-WITHFP-NEXT: sd t0, 0(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: call va5_aligned_stack_callee ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 32(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 40(sp) @@ -1650,9 +1650,9 @@ define i32 @va6_no_fixed_args(...) nounwind { ; ILP32-ILP32F-FPELIM-NEXT: sw a3, 28(sp) ; ILP32-ILP32F-FPELIM-NEXT: sw a2, 24(sp) ; ILP32-ILP32F-FPELIM-NEXT: sw a1, 20(sp) -; ILP32-ILP32F-FPELIM-NEXT: sw a0, 16(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi a1, sp, 20 ; ILP32-ILP32F-FPELIM-NEXT: sw a1, 12(sp) +; ILP32-ILP32F-FPELIM-NEXT: sw a0, 16(sp) ; ILP32-ILP32F-FPELIM-NEXT: addi sp, sp, 48 ; ILP32-ILP32F-FPELIM-NEXT: ret ; @@ -1669,9 +1669,9 @@ define i32 @va6_no_fixed_args(...) nounwind { ; ILP32-ILP32F-WITHFP-NEXT: sw a3, 12(s0) ; ILP32-ILP32F-WITHFP-NEXT: sw a2, 8(s0) ; ILP32-ILP32F-WITHFP-NEXT: sw a1, 4(s0) -; ILP32-ILP32F-WITHFP-NEXT: sw a0, 0(s0) ; ILP32-ILP32F-WITHFP-NEXT: addi a1, s0, 4 ; ILP32-ILP32F-WITHFP-NEXT: sw a1, -12(s0) +; ILP32-ILP32F-WITHFP-NEXT: sw a0, 0(s0) ; ILP32-ILP32F-WITHFP-NEXT: lw s0, 8(sp) ; ILP32-ILP32F-WITHFP-NEXT: lw ra, 12(sp) ; ILP32-ILP32F-WITHFP-NEXT: addi sp, sp, 48 @@ -1687,9 +1687,9 @@ define i32 @va6_no_fixed_args(...) nounwind { ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a3, 28(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a2, 24(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a1, 20(sp) -; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 16(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi a1, sp, 20 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a1, 12(sp) +; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: sw a0, 16(sp) ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: addi sp, sp, 48 ; RV32D-ILP32-ILP32F-ILP32D-FPELIM-NEXT: ret ; @@ -1703,10 +1703,10 @@ define i32 @va6_no_fixed_args(...) nounwind { ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a3, 40(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a2, 32(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 24(sp) -; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 16(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi a1, sp, 16 ; LP64-LP64F-LP64D-FPELIM-NEXT: ori a1, a1, 8 ; LP64-LP64F-LP64D-FPELIM-NEXT: sd a1, 8(sp) +; LP64-LP64F-LP64D-FPELIM-NEXT: sd a0, 16(sp) ; LP64-LP64F-LP64D-FPELIM-NEXT: addi sp, sp, 80 ; LP64-LP64F-LP64D-FPELIM-NEXT: ret ; @@ -1723,10 +1723,10 @@ define i32 @va6_no_fixed_args(...) nounwind { ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a3, 24(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a2, 16(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, 8(s0) -; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, 0(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: mv a1, s0 ; LP64-LP64F-LP64D-WITHFP-NEXT: ori a1, a1, 8 ; LP64-LP64F-LP64D-WITHFP-NEXT: sd a1, -24(s0) +; LP64-LP64F-LP64D-WITHFP-NEXT: sd a0, 0(s0) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld s0, 16(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: ld ra, 24(sp) ; LP64-LP64F-LP64D-WITHFP-NEXT: addi sp, sp, 96 diff --git a/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll b/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll index 7c4bbcd..12c7796 100644 --- a/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll +++ b/llvm/test/CodeGen/RISCV/zext-with-load-is-free.ll @@ -45,10 +45,10 @@ define i32 @test_zext_i16() nounwind { ; RV32I-LABEL: test_zext_i16: ; RV32I: # %bb.0: # %entry ; RV32I-NEXT: lui a0, %hi(shorts) -; RV32I-NEXT: lhu a1, %lo(shorts)(a0) -; RV32I-NEXT: lui a2, 16 -; RV32I-NEXT: addi a2, a2, -120 -; RV32I-NEXT: bne a1, a2, .LBB1_3 +; RV32I-NEXT: lui a1, 16 +; RV32I-NEXT: addi a1, a1, -120 +; RV32I-NEXT: lhu a2, %lo(shorts)(a0) +; RV32I-NEXT: bne a2, a1, .LBB1_3 ; RV32I-NEXT: # %bb.1: # %entry ; RV32I-NEXT: addi a0, a0, %lo(shorts) ; RV32I-NEXT: lhu a0, 2(a0)