From: Philip Reames Date: Mon, 9 May 2022 15:49:40 +0000 (-0700) Subject: [riscv] Add a few more vsetvli insertion tests X-Git-Tag: upstream/15.0.7~8235 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=11728db9e6eadd2831e055e378a4d1d3a83d04df;p=platform%2Fupstream%2Fllvm.git [riscv] Add a few more vsetvli insertion tests These are aimed at a possible miscompile spotted in the vmv.s.x/f mutation case, but it appears this is a latent bug. Or at least, I haven't been able to construct a case with compatible policy flags via intrinsics. --- diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll index 74e8a6b..aa826ec 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -350,6 +350,53 @@ entry: ret double %c3 } + +define @test18( %a, double %b) nounwind { +; CHECK-LABEL: test18: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli a0, 6, e64, m1, tu, mu +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vfmv.s.f v9, fa0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v8 +; CHECK-NEXT: vsetivli zero, 1, e64, m1, tu, mu +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v8 +; CHECK-NEXT: ret +entry: + %x = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 3, i64 0) + %y = call @llvm.riscv.vfmv.s.f.nxv1f64( + %a, double %b, i64 2) + %f2 = tail call @llvm.riscv.vfadd.nxv1f64.nxv1f64( + undef, + %a, + %a, + i64 %x) + %y2 = call @llvm.riscv.vfmv.s.f.nxv1f64( + %f2, double %b, i64 1) + %res = fadd %y, %y2 + ret %res +} + +define @test19( %a, double %b) nounwind { +; CHECK-LABEL: test19: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 6, e64, m1, tu, mu +; CHECK-NEXT: vmv1r.v v9, v8 +; CHECK-NEXT: vfmv.s.f v9, fa0 +; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v8 +; CHECK-NEXT: ret +entry: + %x = tail call i64 @llvm.riscv.vsetvli(i64 6, i64 3, i64 0) + %y = call @llvm.riscv.vfmv.s.f.nxv1f64( + %a, double %b, i64 2) + %y2 = fadd %y, %a + ret %y2 +} + + declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( , ,