From d7ae2438b9bd062159fa9bfa8e4db2b8a0d66e38 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 27 May 2021 11:28:19 -0700 Subject: [PATCH] [RISCV] Add a test showing missed opportunity to avoid a vsetvli in a loop. This is another case we need to look through a phi to prove. --- .../CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll | 57 ++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll index 52b01ee..3300de6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -445,3 +445,60 @@ if.end: ; preds = %if.else, %if.then ret %3 } +; FIXME: The vsetvli in for.body can be removed, it's redundant by its +; predecessors, but we need to look through a PHI to prove it. +define void @saxpy_vec(i64 %n, float %a, float* nocapture readonly %x, float* nocapture %y) { +; CHECK-LABEL: saxpy_vec: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a4, a0, e32,m8,ta,mu +; CHECK-NEXT: beqz a4, .LBB8_3 +; CHECK-NEXT: # %bb.1: # %for.body.preheader +; CHECK-NEXT: fmv.w.x ft0, a1 +; CHECK-NEXT: .LBB8_2: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetvli zero, a4, e32,m8,ta,mu +; CHECK-NEXT: vle32.v v8, (a2) +; CHECK-NEXT: vle32.v v16, (a3) +; CHECK-NEXT: slli a1, a4, 2 +; CHECK-NEXT: add a2, a2, a1 +; CHECK-NEXT: vsetvli zero, zero, e32,m8,tu,mu +; CHECK-NEXT: vfmacc.vf v16, ft0, v8 +; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu +; CHECK-NEXT: vse32.v v16, (a3) +; CHECK-NEXT: sub a0, a0, a4 +; CHECK-NEXT: vsetvli a4, a0, e32,m8,ta,mu +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: bnez a4, .LBB8_2 +; CHECK-NEXT: .LBB8_3: # %for.end +; CHECK-NEXT: ret +entry: + %0 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %n, i64 2, i64 3) + %cmp.not13 = icmp eq i64 %0, 0 + br i1 %cmp.not13, label %for.end, label %for.body + +for.body: ; preds = %for.body, %entry + %1 = phi i64 [ %7, %for.body ], [ %0, %entry ] + %n.addr.016 = phi i64 [ %sub, %for.body ], [ %n, %entry ] + %x.addr.015 = phi float* [ %add.ptr, %for.body ], [ %x, %entry ] + %y.addr.014 = phi float* [ %add.ptr1, %for.body ], [ %y, %entry ] + %2 = bitcast float* %x.addr.015 to * + %3 = tail call @llvm.riscv.vle.nxv16f32.i64(* %2, i64 %1) + %add.ptr = getelementptr inbounds float, float* %x.addr.015, i64 %1 + %4 = bitcast float* %y.addr.014 to * + %5 = tail call @llvm.riscv.vle.nxv16f32.i64(* %4, i64 %1) + %6 = tail call @llvm.riscv.vfmacc.nxv16f32.f32.i64( %5, float %a, %3, i64 %1) + tail call void @llvm.riscv.vse.nxv16f32.i64( %6, * %4, i64 %1) + %add.ptr1 = getelementptr inbounds float, float* %y.addr.014, i64 %1 + %sub = sub i64 %n.addr.016, %1 + %7 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %sub, i64 2, i64 3) + %cmp.not = icmp eq i64 %7, 0 + br i1 %cmp.not, label %for.end, label %for.body + +for.end: ; preds = %for.body, %entry + ret void +} + +declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) +declare @llvm.riscv.vle.nxv16f32.i64(* nocapture, i64) +declare @llvm.riscv.vfmacc.nxv16f32.f32.i64(, float, , i64) +declare void @llvm.riscv.vse.nxv16f32.i64(, * nocapture, i64) -- 2.7.4