From: Zakk Chen Date: Thu, 17 Feb 2022 04:49:25 +0000 (-0800) Subject: [RISCV][NFC] Add some tail agnostic tests for nomask operations. X-Git-Tag: upstream/15.0.7~15985 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c6a3225bb03b6afc2b63fbf13db3c100406b32ce;p=platform%2Fupstream%2Fllvm.git [RISCV][NFC] Add some tail agnostic tests for nomask operations. Improve test coverage for tail agnostic nomask vslidedown/up, vmv.s.x vfmv.s.f and vcompress. Reviewed By: rogfer01 Differential Revision: https://reviews.llvm.org/D120008 --- diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll index d220ba8..65b225e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll @@ -1054,3 +1054,137 @@ entry: ret %a } + +declare @llvm.riscv.vslidedown.nxv1i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vslidedown_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; RV32-NEXT: vslidedown.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; RV64-NEXT: vslidedown.vx v8, v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vslidedown.nxv1i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vslideup.nxv1i8( + , + , + iXLen, + iXLen); + +define @intrinsic_vslideup_vx_nxv1i8_nxv1i8( %0, iXLen %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; RV32-NEXT: vslideup.vx v9, v8, a0 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vslideup_vx_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, mu +; RV64-NEXT: vslideup.vx v9, v8, a0 +; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vslideup.nxv1i8( + undef, + %0, + iXLen %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmv.s.x.nxv1i64(, i64, iXLen); + +define @intrinsic_vmv.s.x_x_nxv1i64(i64 %0, iXLen %1) nounwind { +; RV32-LABEL: intrinsic_vmv.s.x_x_nxv1i64: +; RV32: # %bb.0: # %entry +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v8, (a0), zero +; RV32-NEXT: vid.v v9 +; RV32-NEXT: vmseq.vi v0, v9, 0 +; RV32-NEXT: vmerge.vvm v8, v8, v8, v0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vmv.s.x_x_nxv1i64: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu +; RV64-NEXT: vmv.s.x v8, a0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vmv.s.x.nxv1i64( undef, i64 %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv1f16(, half, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv1f16(half %0, iXLen %1) nounwind { +; RV32-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV32-NEXT: vfmv.s.f v8, fa0 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vfmv.s.f_f_nxv1f16: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; RV64-NEXT: vfmv.s.f v8, fa0 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1f16( undef, half %0, iXLen %1) + ret %a +} + +declare @llvm.riscv.vcompress.nxv1i8( + , + , + , + iXLen); + +define @intrinsic_vcompress_um_nxv1i8_nxv1i8( %0, %1, iXLen %2) nounwind { +; RV32-LABEL: intrinsic_vcompress_um_nxv1i8_nxv1i8: +; RV32: # %bb.0: # %entry +; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV32-NEXT: vcompress.vm v9, v8, v0 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: intrinsic_vcompress_um_nxv1i8_nxv1i8: +; RV64: # %bb.0: # %entry +; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; RV64-NEXT: vcompress.vm v9, v8, v0 +; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: ret +entry: + %a = call @llvm.riscv.vcompress.nxv1i8( + undef, + %0, + %1, + iXLen %2) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll index 5a55268..aa42e49 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll @@ -814,21 +814,3 @@ entry: ret %a } - -; Test with undef for the dest operand. This should use tail agnostic policy. -define @intrinsic_vcompress_um_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vcompress_um_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vcompress.vm v9, v8, v0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vcompress.nxv1i8( - undef, - %0, - %1, - i32 %2) - - ret %a -} diff --git a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll index d77f0da..51924e1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll @@ -814,21 +814,3 @@ entry: ret %a } - -; Test with undef for the dest operand. This should use tail agnostic policy. -define @intrinsic_vcompress_um_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { -; CHECK-LABEL: intrinsic_vcompress_um_nxv1i8_nxv1i8: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu -; CHECK-NEXT: vcompress.vm v9, v8, v0 -; CHECK-NEXT: vmv1r.v v8, v9 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vcompress.nxv1i8( - undef, - %0, - %1, - i64 %2) - - ret %a -}