From: Philip Reames Date: Thu, 5 May 2022 14:35:09 +0000 (-0700) Subject: [riscv] Use X0 for destination of VSETVLI instruction if result unused X-Git-Tag: upstream/15.0.7~8562 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=042a7a5f0da8beb89cbbfe2f1c3a155e654d9375;p=platform%2Fupstream%2Fllvm.git [riscv] Use X0 for destination of VSETVLI instruction if result unused If the GPR destination register of a VSETVLI instruction is unused, we can replace it with X0. This discards the result, and thus reduces register pressure. Since after the core insertion/lowering algorithm has run, many user written VSETVLIs will have their GPR result unused (as VTYPE/VLEN is now explicitly read instead), this kicks in for most tests which involve a vsetvli intrinsic for fixed length vectorization. (vscale vectorization generally uses the GPR result to know how far to e.g. advance pointers in a loop and these uses are not removed.) When inserting VSETVLIs to lower psuedos, we prefer the X0 form anyways. Differential Revision: https://reviews.llvm.org/D124961 --- diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 304d1c3..f58e087 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -1218,6 +1218,22 @@ bool RISCVInsertVSETVLI::runOnMachineFunction(MachineFunction &MF) { // predecessors. for (MachineBasicBlock &MBB : MF) emitVSETVLIs(MBB); + + // Once we're fully done rewriting all the instructions, do a final pass + // through to check for VSETVLIs which write to an unused destination. + // For the non X0, X0 variant, we can replace the destination register + // with X0 to reduce register pressure. This is really a generic + // optimization which can be applied to any dead def (TODO: generalize). + for (MachineBasicBlock &MBB : MF) { + for (MachineInstr &MI : MBB) { + if (MI.getOpcode() == RISCV::PseudoVSETVLI || + MI.getOpcode() == RISCV::PseudoVSETIVLI) { + Register VRegDef = MI.getOperand(0).getReg(); + if (VRegDef != RISCV::X0 && MRI->use_nodbg_empty(VRegDef)) + MI.getOperand(0).setReg(RISCV::X0); + } + } + } } BlockInfo.clear(); diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll index 80425f4..cc9a55d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv32-vsetvli-intrinsics.ll @@ -9,7 +9,7 @@ declare i32 @llvm.riscv.vsetvlimax.opt.i32(i32, i32) define void @test_vsetvli_e64mf8(i32 %avl) nounwind { ; CHECK-LABEL: test_vsetvli_e64mf8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, a0, e64, mf8, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, mf8, ta, mu ; CHECK-NEXT: ret call i32 @llvm.riscv.vsetvli.i32(i32 %avl, i32 3, i32 5) ret void @@ -18,7 +18,7 @@ define void @test_vsetvli_e64mf8(i32 %avl) nounwind { define void @test_vsetvli_e8mf2_zero_avl() nounwind { ; CHECK-LABEL: test_vsetvli_e8mf2_zero_avl: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a0, 0, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e8, mf2, ta, mu ; CHECK-NEXT: ret call i32 @llvm.riscv.vsetvli.i32(i32 0, i32 0, i32 7) ret void @@ -101,7 +101,7 @@ declare @llvm.riscv.vle.nxv4i32.i32(, @redundant_vsetvli(i32 %avl, * %ptr) nounwind { ; CHECK-LABEL: redundant_vsetvli: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: ret %vl = call i32 @llvm.riscv.vsetvli.i32(i32 %avl, i32 2, i32 1) @@ -117,7 +117,7 @@ define @repeated_vsetvli(i32 %avl, * %ptr) ; CHECK-LABEL: repeated_vsetvli: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, a0, e32, m2, ta, mu -; CHECK-NEXT: vsetvli a0, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: ret %vl0 = call i32 @llvm.riscv.vsetvli.i32(i32 %avl, i32 2, i32 1) diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll index 0dedab5..0aea057 100644 --- a/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rv64-vsetvli-intrinsics.ll @@ -9,7 +9,7 @@ declare i64 @llvm.riscv.vsetvlimax.opt.i64(i64, i64) define void @test_vsetvli_e8m1(i64 %avl) nounwind { ; CHECK-LABEL: test_vsetvli_e8m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu ; CHECK-NEXT: ret call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 0, i64 0) ret void @@ -18,7 +18,7 @@ define void @test_vsetvli_e8m1(i64 %avl) nounwind { define void @test_vsetvli_e16mf4(i64 %avl) nounwind { ; CHECK-LABEL: test_vsetvli_e16mf4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: ret call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 1, i64 6) ret void @@ -27,7 +27,7 @@ define void @test_vsetvli_e16mf4(i64 %avl) nounwind { define void @test_vsetvli_e32mf8_zero_avl() nounwind { ; CHECK-LABEL: test_vsetvli_e32mf8_zero_avl: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a0, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, mu ; CHECK-NEXT: ret call i64 @llvm.riscv.vsetvli.i64(i64 0, i64 1, i64 6) ret void @@ -119,7 +119,7 @@ declare @llvm.riscv.vle.nxv4i32.i64(, @redundant_vsetvli(i64 %avl, * %ptr) nounwind { ; CHECK-LABEL: redundant_vsetvli: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: ret %vl = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 2, i64 1) @@ -135,7 +135,7 @@ define @repeated_vsetvli(i64 %avl, * %ptr) ; CHECK-LABEL: repeated_vsetvli: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, a0, e32, m2, ta, mu -; CHECK-NEXT: vsetvli a0, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: ret %vl0 = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 2, i64 1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll index 5490a21..340f92f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -23,7 +23,7 @@ declare void @llvm.riscv.vse.nxv2f32(, * define @test1(i64 %avl, i8 zeroext %cond, %a, %b) nounwind { ; CHECK-LABEL: test1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: beqz a1, .LBB0_2 ; CHECK-NEXT: # %bb.1: # %if.then ; CHECK-NEXT: vfadd.vv v8, v8, v9 @@ -54,7 +54,7 @@ if.end: ; preds = %if.else, %if.then define @test2(i64 %avl, i8 zeroext %cond, %a, %b) nounwind { ; CHECK-LABEL: test2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: beqz a1, .LBB1_2 ; CHECK-NEXT: # %bb.1: # %if.then ; CHECK-NEXT: vfadd.vv v9, v8, v9 @@ -180,7 +180,7 @@ define @test5(i64 %avl, i8 zeroext %cond, @test6(i64 %avl, i8 zeroext %cond, @test6(i64 %avl, i8 zeroext %cond, @llvm.riscv.vle.mask.nxv1i64( define @test1(i64 %avl, %a, %b) nounwind { ; CHECK-LABEL: test1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -34,7 +34,7 @@ entry: define @test2(i64 %avl, %a, %b) nounwind { ; CHECK-LABEL: test2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret entry: @@ -50,7 +50,7 @@ entry: define @test3(i64 %avl, %a, * %b, %c) nounwind { ; CHECK-LABEL: test3: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a1), v0.t ; CHECK-NEXT: ret entry: @@ -67,7 +67,7 @@ entry: define @test4(i64 %avl, %a, * %b, %c) nounwind { ; CHECK-LABEL: test4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a1), v0.t ; CHECK-NEXT: ret entry: @@ -85,7 +85,7 @@ entry: define @test5( %0, %1, %2, i64 %avl) nounwind { ; CHECK-LABEL: test5: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vmseq.vv v8, v8, v9 ; CHECK-NEXT: vmand.mm v0, v8, v0 ; CHECK-NEXT: ret @@ -165,7 +165,7 @@ entry: define @test8( %a, i64 %b, %mask) nounwind { ; CHECK-LABEL: test8: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli a1, 6, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, mu ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret entry: @@ -209,7 +209,7 @@ entry: define @test11( %a, double %b) nounwind { ; CHECK-LABEL: test11: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli a0, 6, e64, m1, tu, mu +; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, mu ; CHECK-NEXT: vfmv.s.f v8, fa0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir index 1c58f823..6556a25 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir @@ -349,7 +349,7 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8 - ; CHECK-NEXT: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype + ; CHECK-NEXT: $x0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]] ; CHECK-NEXT: PseudoRET implicit $v8