From e0110a47402348f63b7e10dcf210bdf342a3bf9c Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 23 Dec 2020 10:01:43 -0800 Subject: [PATCH] [RISCV] Add intrinsics for vfmv.v.f Also include a special case pattern to use vmv.v.x vd, zero when the argument is 0.0. Reviewed By: khchen Differential Revision: https://reviews.llvm.org/D93672 --- llvm/include/llvm/IR/IntrinsicsRISCV.td | 4 + llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 31 +- llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll | 421 ++++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll | 421 ++++++++++++++++++++++++ 4 files changed, 876 insertions(+), 1 deletion(-) create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 13f8837..37e463d 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -528,6 +528,10 @@ let TargetPrefix = "riscv" in { [IntrNoMem]>, RISCVVIntrinsic { let ExtendOperand = 1; } + def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty], + [LLVMVectorElementType<0>, llvm_anyint_ty], + [IntrNoMem]>, RISCVVIntrinsic; + def int_riscv_vmv_x_s : Intrinsic<[LLVMVectorElementType<0>], [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 50142b0..c3b720a 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -779,6 +779,14 @@ multiclass VPseudoUnaryV_V_X_I_NoDummyMask { } } +multiclass VPseudoUnaryV_F_NoDummyMask { + foreach m = MxList.m in { + let VLMul = m.value in { + def "_F_" # m.MX : VPseudoUnaryNoDummyMask; + } + } +} + // The destination EEW is 1. // The source EEW is 8, 16, 32, or 64. // When the destination EEW is different from source EEW, we need to use @@ -1996,11 +2004,15 @@ defm PseudoVMFGT : VPseudoBinaryM_VX; defm PseudoVMFGE : VPseudoBinaryM_VX; //===----------------------------------------------------------------------===// +// 14.14. Vector Floating-Point Move Instruction +//===----------------------------------------------------------------------===// +defm PseudoVFMV_V : VPseudoUnaryV_F_NoDummyMask; + +//===----------------------------------------------------------------------===// // 14.15. Vector Floating-Point Merge Instruction //===----------------------------------------------------------------------===// defm PseudoVFMERGE : VPseudoBinaryV_XM; - } // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// @@ -2415,6 +2427,23 @@ defm "" : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>; defm "" : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>; //===----------------------------------------------------------------------===// +// 14.14. Vector Floating-Point Move Instruction +//===----------------------------------------------------------------------===// +foreach fvti = AllFloatVectors in { + // If we're splatting fpimm0, use vmv.v.x vd, x0. + def : Pat<(fvti.Vector (int_riscv_vfmv_v_f + (fvti.Scalar (fpimm0)), GPR:$vl)), + (!cast("PseudoVMV_V_X_"#fvti.LMul.MX) + X0, (NoX0 GPR:$vl), fvti.SEW)>; + + def : Pat<(fvti.Vector (int_riscv_vfmv_v_f + (fvti.Scalar fvti.ScalarRegClass:$rs2), GPR:$vl)), + (!cast("PseudoVFMV_V_F_"#fvti.LMul.MX) + ToFPR32.ret, + (NoX0 GPR:$vl), fvti.SEW)>; +} + +//===----------------------------------------------------------------------===// // 14.15. Vector Floating-Point Merge Instruction //===----------------------------------------------------------------------===// // We can use vmerge.vvm to support vector-vector vfmerge. diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll new file mode 100644 index 0000000..521c6b4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv32.ll @@ -0,0 +1,421 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -target-abi ilp32d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmv.v.f.nxv1f16.f16( + half, + i32); + +define @intrinsic_vfmv.v.f_f_nxv1f16_f16(half %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv1f16.f16( + half %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv2f16.f16( + half, + i32); + +define @intrinsic_vfmv.v.f_f_nxv2f16_f16(half %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv2f16.f16( + half %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv4f16.f16( + half, + i32); + +define @intrinsic_vfmv.v.f_f_nxv4f16_f16(half %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv4f16.f16( + half %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv8f16.f16( + half, + i32); + +define @intrinsic_vfmv.v.f_f_nxv8f16_f16(half %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv8f16.f16( + half %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv16f16.f16( + half, + i32); + +define @intrinsic_vfmv.v.f_f_nxv16f16_f16(half %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv16f16.f16( + half %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv32f16.f16( + half, + i32); + +define @intrinsic_vfmv.v.f_f_nxv32f16_f16(half %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv32f16.f16( + half %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv1f32.f32( + float, + i32); + +define @intrinsic_vfmv.v.f_f_nxv1f32_f32(float %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv1f32.f32( + float %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv2f32.f32( + float, + i32); + +define @intrinsic_vfmv.v.f_f_nxv2f32_f32(float %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv2f32.f32( + float %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv4f32.f32( + float, + i32); + +define @intrinsic_vfmv.v.f_f_nxv4f32_f32(float %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv4f32.f32( + float %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv8f32.f32( + float, + i32); + +define @intrinsic_vfmv.v.f_f_nxv8f32_f32(float %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv8f32.f32( + float %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv16f32.f32( + float, + i32); + +define @intrinsic_vfmv.v.f_f_nxv16f32_f32(float %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv16f32.f32( + float %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv1f64.f64( + double, + i32); + +define @intrinsic_vfmv.v.f_f_nxv1f64_f64(double %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv1f64.f64( + double %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv2f64.f64( + double, + i32); + +define @intrinsic_vfmv.v.f_f_nxv2f64_f64(double %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv2f64.f64( + double %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv4f64.f64( + double, + i32); + +define @intrinsic_vfmv.v.f_f_nxv4f64_f64(double %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv4f64.f64( + double %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv8f64.f64( + double, + i32); + +define @intrinsic_vfmv.v.f_f_nxv8f64_f64(double %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv8f64.f64( + double %0, + i32 %1) + + ret %a +} + +define @intrinsic_vfmv.v.f_zero_nxv1f16_f16(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv1f16.f16( + half 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv2f16_f16(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv2f16.f16( + half 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv4f16_f16(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv4f16.f16( + half 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv8f16_f16(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv8f16.f16( + half 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv16f16_f16(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv16f16.f16( + half 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv32f16_f16(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv32f16.f16( + half 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv1f32_f32(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv1f32.f32( + float 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv2f32_f32(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv2f32.f32( + float 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv4f32_f32(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv4f32.f32( + float 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv8f32_f32(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv8f32.f32( + float 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv16f32_f32(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv16f32.f32( + float 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv1f64_f64(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv1f64.f64( + double 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv2f64_f64(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv2f64.f64( + double 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv4f64_f64(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv4f64.f64( + double 0.0, + i32 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv8f64_f64(i32 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv8f64.f64( + double 0.0, + i32 %0) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll new file mode 100644 index 0000000..525fa09 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.v.f-rv64.ll @@ -0,0 +1,421 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -target-abi lp64d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmv.v.f.nxv1f16.f16( + half, + i64); + +define @intrinsic_vfmv.v.f_f_nxv1f16_f16(half %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv1f16.f16( + half %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv2f16.f16( + half, + i64); + +define @intrinsic_vfmv.v.f_f_nxv2f16_f16(half %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv2f16.f16( + half %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv4f16.f16( + half, + i64); + +define @intrinsic_vfmv.v.f_f_nxv4f16_f16(half %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv4f16.f16( + half %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv8f16.f16( + half, + i64); + +define @intrinsic_vfmv.v.f_f_nxv8f16_f16(half %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv8f16.f16( + half %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv16f16.f16( + half, + i64); + +define @intrinsic_vfmv.v.f_f_nxv16f16_f16(half %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv16f16.f16( + half %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv32f16.f16( + half, + i64); + +define @intrinsic_vfmv.v.f_f_nxv32f16_f16(half %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv32f16.f16( + half %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv1f32.f32( + float, + i64); + +define @intrinsic_vfmv.v.f_f_nxv1f32_f32(float %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv1f32.f32( + float %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv2f32.f32( + float, + i64); + +define @intrinsic_vfmv.v.f_f_nxv2f32_f32(float %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv2f32.f32( + float %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv4f32.f32( + float, + i64); + +define @intrinsic_vfmv.v.f_f_nxv4f32_f32(float %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv4f32.f32( + float %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv8f32.f32( + float, + i64); + +define @intrinsic_vfmv.v.f_f_nxv8f32_f32(float %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv8f32.f32( + float %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv16f32.f32( + float, + i64); + +define @intrinsic_vfmv.v.f_f_nxv16f32_f32(float %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv16f32.f32( + float %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv1f64.f64( + double, + i64); + +define @intrinsic_vfmv.v.f_f_nxv1f64_f64(double %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv1f64.f64( + double %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv2f64.f64( + double, + i64); + +define @intrinsic_vfmv.v.f_f_nxv2f64_f64(double %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv2f64.f64( + double %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv4f64.f64( + double, + i64); + +define @intrinsic_vfmv.v.f_f_nxv4f64_f64(double %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv4f64.f64( + double %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv8f64.f64( + double, + i64); + +define @intrinsic_vfmv.v.f_f_nxv8f64_f64(double %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu +; CHECK: vfmv.v.f {{v[0-9]+}}, fa0 + %a = call @llvm.riscv.vfmv.v.f.nxv8f64.f64( + double %0, + i64 %1) + + ret %a +} + +define @intrinsic_vfmv.v.f_zero_nxv1f16_f16(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf4,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv1f16.f16( + half 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv2f16_f16(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,mf2,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv2f16.f16( + half 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv4f16_f16(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m1,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv4f16.f16( + half 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv8f16_f16(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m2,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv8f16.f16( + half 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv16f16_f16(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m4,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv16f16.f16( + half 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv32f16_f16(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, a0, e16,m8,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv32f16.f16( + half 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv1f32_f32(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,mf2,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv1f32.f32( + float 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv2f32_f32(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m1,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv2f32.f32( + float 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv4f32_f32(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m2,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv4f32.f32( + float 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv8f32_f32(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m4,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv8f32.f32( + float 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv16f32_f32(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, a0, e32,m8,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv16f32.f32( + float 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv1f64_f64(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m1,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv1f64.f64( + double 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv2f64_f64(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m2,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv2f64.f64( + double 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv4f64_f64(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m4,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv4f64.f64( + double 0.0, + i64 %0) + + ret %a +} + +define @intrinsic_vmv.v.x_zero_nxv8f64_f64(i64 %0) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmv.v.x_zero_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, a0, e64,m8,ta,mu +; CHECK: vmv.v.x {{v[0-9]+}}, zero + %a = call @llvm.riscv.vfmv.v.f.nxv8f64.f64( + double 0.0, + i64 %0) + + ret %a +} -- 2.7.4