The goal is support tail and mask policy in RVV builtins.
We focus on IR part first.
If the passthru operand is undef, we use tail agnostic, otherwise
use tail undisturbed.
My plan is to handle more complex operations in follow-up patches.
Reviewers: frasercrmck
Differential Revision: https://reviews.llvm.org/D118253
[["w", "wv"]]>;
// 12.3. Vector Integer Extension
+let HasNoMaskPassThru = true in {
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
def vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">;
def vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">;
def vsext_vf8 : RVVIntExt<"vsext", "o", "ov", "c">;
def vzext_vf8 : RVVIntExt<"vzext", "Uo", "UoUv", "c">;
}
+}
// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
let HasMask = false, HasPolicy = false in {
defm vfwnmsac : RVVFloatingWidenTerBuiltinSet;
// 14.8. Vector Floating-Point Square-Root Instruction
+let HasNoMaskPassThru = true in {
def vfsqrt : RVVFloatingUnaryVVBuiltin;
// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
def vfrec7 : RVVFloatingUnaryVVBuiltin;
// 14.11. Vector Floating-Point MIN/MAX Instructions
-let HasNoMaskPassThru = true in {
defm vfmin : RVVFloatingBinBuiltinSet;
defm vfmax : RVVFloatingBinBuiltinSet;
}
// 14.14. Vector Floating-Point Classify Instruction
-let Name = "vfclass_v", HasPolicy = false in
+let Name = "vfclass_v", HasNoMaskPassThru = true in
def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">;
// 14.15. Vector Floating-Point Merge Instructio
[["f", "v", "ve"]]>;
// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
+let HasNoMaskPassThru = true in {
def vfcvt_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_xu">;
def vfcvt_x_f_v : RVVConvToSignedBuiltin<"vfcvt_x">;
def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">;
def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_f">;
def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">;
}
+}
// 15. Vector Reduction Operations
// 15.1. Vector Single-Width Integer Reduction Instructions
// 16.6. vmsof.m set-only-first mask bit
def vmsof : RVVMaskUnaryBuiltin;
-let HasNoMaskedOverloaded = false in {
+let HasNoMaskPassThru = true, HasNoMaskedOverloaded = false in {
// 16.8. Vector Iota Instruction
defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>;
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfclass_v_u16mf4 (vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfclass_v_u16mf2 (vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfclass_v_u16m1 (vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfclass_v_u16m2 (vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfclass_v_u16m4 (vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfclass_v_u16m8 (vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1f16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1f16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfclass_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2f16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2f16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfclass_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u16m1_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4f16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4f16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfclass_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u16m2_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8f16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8f16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfclass_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u16m4_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16f16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16f16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfclass_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfclass_v_u16m8_m(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32f16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32f16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfclass_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfcvt_rtz_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfcvt_rtz_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfcvt_rtz_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vfcvt_rtz_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_x_v_f16mf4 (vint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_x_v_f16mf2 (vint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_x_v_f16m1 (vint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_x_v_f16m2 (vint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64(<vscale x 16 x half> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_x_v_f16m4 (vint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64(<vscale x 32 x half> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_x_v_f16m8 (vint16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4 (vuint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2 (vuint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfcvt_f_xu_v_f16m1 (vuint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfcvt_f_xu_v_f16m2 (vuint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64(<vscale x 16 x half> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfcvt_f_xu_v_f16m4 (vuint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64(<vscale x 32 x half> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfcvt_f_xu_v_f16m8 (vuint16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vfncvt_rtz_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vfncvt_rtz_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vfncvt_rtz_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_x_w_f16mf4 (vint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_x_w_f16mf2 (vint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_x_w_f16m1 (vint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_x_w_f16m2 (vint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64(<vscale x 16 x half> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_x_w_f16m4 (vint32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4 (vuint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2 (vuint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_xu_w_f16m1 (vuint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_xu_w_f16m2 (vuint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64(<vscale x 16 x half> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_xu_w_f16m4 (vuint32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x half> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x half> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x half> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x half> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x half> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x half> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x half> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x half> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrec7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrec7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrec7_v_f16m1 (vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrec7_v_f16m2 (vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrec7_v_f16m4 (vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrec7_v_f16m8 (vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfrsqrt7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfrsqrt7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfrsqrt7_v_f16m1 (vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfrsqrt7_v_f16m2 (vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfrsqrt7_v_f16m4 (vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfrsqrt7_v_f16m8 (vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfsqrt_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfsqrt_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfsqrt_v_f16m1 (vfloat16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfsqrt_v_f16m2 (vfloat16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfsqrt_v_f16m4 (vfloat16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfsqrt_v_f16m8 (vfloat16m8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4 (vint8mf8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2 (vint8mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_x_v_f16m1 (vint8mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_x_v_f16m2 (vint8m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64(<vscale x 16 x half> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_x_v_f16m4 (vint8m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64(<vscale x 32 x half> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_x_v_f16m8 (vint8m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4 (vuint8mf8_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2 (vuint8mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfwcvt_f_xu_v_f16m1 (vuint8mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfwcvt_f_xu_v_f16m2 (vuint8m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64(<vscale x 16 x half> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfwcvt_f_xu_v_f16m4 (vuint8m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64(<vscale x 32 x half> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfwcvt_f_xu_v_f16m8 (vuint8m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2 (vfloat16mf4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwcvt_f_f_v_f32m1 (vfloat16mf2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwcvt_f_f_v_f32m2 (vfloat16m1_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwcvt_f_f_v_f32m4 (vfloat16m2_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwcvt_f_f_v_f32m8 (vfloat16m4_t src, size_t vl) {
// CHECK-RV64-LABEL: @test_vid_v_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8.i64(<vscale x 1 x i8> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vid_v_u8mf8(size_t vl) { return vid_v_u8mf8(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8.i64(<vscale x 2 x i8> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vid_v_u8mf4(size_t vl) { return vid_v_u8mf4(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8.i64(<vscale x 4 x i8> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vid_v_u8mf2(size_t vl) { return vid_v_u8mf2(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8.i64(<vscale x 8 x i8> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vid_v_u8m1(size_t vl) { return vid_v_u8m1(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8.i64(<vscale x 16 x i8> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vid_v_u8m2(size_t vl) { return vid_v_u8m2(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8.i64(<vscale x 32 x i8> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vid_v_u8m4(size_t vl) { return vid_v_u8m4(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.nxv64i8.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.nxv64i8.i64(<vscale x 64 x i8> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vid_v_u8m8(size_t vl) { return vid_v_u8m8(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16.i64(<vscale x 1 x i16> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vid_v_u16mf4(size_t vl) { return vid_v_u16mf4(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16.i64(<vscale x 2 x i16> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vid_v_u16mf2(size_t vl) { return vid_v_u16mf2(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16.i64(<vscale x 4 x i16> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vid_v_u16m1(size_t vl) { return vid_v_u16m1(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16.i64(<vscale x 8 x i16> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vid_v_u16m2(size_t vl) { return vid_v_u16m2(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16.i64(<vscale x 16 x i16> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vid_v_u16m4(size_t vl) { return vid_v_u16m4(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16.i64(<vscale x 32 x i16> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vid_v_u16m8(size_t vl) { return vid_v_u16m8(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(<vscale x 1 x i32> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vid_v_u32mf2(size_t vl) { return vid_v_u32mf2(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32.i64(<vscale x 2 x i32> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vid_v_u32m1(size_t vl) { return vid_v_u32m1(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32.i64(<vscale x 4 x i32> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vid_v_u32m2(size_t vl) { return vid_v_u32m2(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32.i64(<vscale x 8 x i32> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vid_v_u32m4(size_t vl) { return vid_v_u32m4(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32.i64(<vscale x 16 x i32> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vid_v_u32m8(size_t vl) { return vid_v_u32m8(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64.i64(<vscale x 1 x i64> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vid_v_u64m1(size_t vl) { return vid_v_u64m1(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64.i64(<vscale x 2 x i64> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vid_v_u64m2(size_t vl) { return vid_v_u64m2(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64.i64(<vscale x 4 x i64> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vid_v_u64m4(size_t vl) { return vid_v_u64m4(vl); }
// CHECK-RV64-LABEL: @test_vid_v_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64.i64(i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64.i64(<vscale x 8 x i64> undef, i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vid_v_u64m8(size_t vl) { return vid_v_u64m8(vl); }
// CHECK-RV64-LABEL: @test_viota_m_u8mf8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_viota_m_u8mf8(vbool64_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u8mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_viota_m_u8mf4(vbool32_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u8mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_viota_m_u8mf2(vbool16_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u8m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_viota_m_u8m1(vbool8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u8m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_viota_m_u8m2(vbool4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u8m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_viota_m_u8m4(vbool2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u8m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_viota_m_u8m8(vbool1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_viota_m_u16mf4(vbool64_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_viota_m_u16mf2(vbool32_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_viota_m_u16m1(vbool16_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_viota_m_u16m2(vbool8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_viota_m_u16m4(vbool4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_viota_m_u16m8(vbool2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_viota_m_u32mf2(vbool64_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_viota_m_u32m1(vbool32_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_viota_m_u32m2(vbool16_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_viota_m_u32m4(vbool8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_viota_m_u32m8(vbool4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_viota_m_u64m1(vbool64_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_viota_m_u64m2(vbool32_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_viota_m_u64m4(vbool16_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_viota_m_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_viota_m_u64m8(vbool8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
let VLOperand = 4;
}
// For destination vector type is the same as source vector.
- // Input: (vector_in, vl)
+ // Input: (passthru, vector_in, vl)
class RISCVUnaryAANoMask
: Intrinsic<[llvm_anyvector_ty],
- [LLVMMatchType<0>, llvm_anyint_ty],
+ [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
+ let VLOperand = 2;
}
// For destination vector type is the same as first source vector (with mask).
// Input: (vector_in, mask, vl, ta)
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
- class RISCVUnaryAAMaskNoTA
+ class RISCVUnaryAAMaskTU
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
}
// For FP classify operations.
// Output: (bit mask type output)
- // Input: (vector_in, vl)
+ // Input: (passthru, vector_in, vl)
class RISCVClassifyNoMask
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
- [llvm_anyvector_ty, llvm_anyint_ty],
+ [LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
+ llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 1;
}
// For FP classify operations with mask.
// Output: (bit mask type output)
- // Input: (maskedoff, vector_in, mask, vl)
+ // Input: (maskedoff, vector_in, mask, vl, policy)
class RISCVClassifyMask
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
[LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
- LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
- [IntrNoMem]>, RISCVVIntrinsic {
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_anyint_ty, LLVMMatchType<1>],
+ [IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
let VLOperand = 3;
}
// For Saturating binary operations.
let VLOperand = 2;
}
// For destination vector type is NOT the same as source vector.
- // Input: (vector_in, vl)
+ // Input: (passthru, vector_in, vl)
class RISCVUnaryABNoMask
: Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_anyint_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
+ let VLOperand = 2;
}
// For destination vector type is NOT the same as source vector (with mask).
// Input: (maskedoff, vector_in, mask, vl, ta)
// Input: (vl)
class RISCVNullaryIntrinsic
: Intrinsic<[llvm_anyvector_ty],
- [llvm_anyint_ty],
+ [llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic {
+ let VLOperand = 1;
+ }
+ // Output: (vector)
+ // Input: (passthru, vl)
+ class RISCVNullaryIntrinsicTU
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 0;
+ let VLOperand = 1;
}
// For Conversion unary operations.
- // Input: (vector_in, vl)
+ // Input: (passthru, vector_in, vl)
class RISCVConversionNoMask
: Intrinsic<[llvm_anyvector_ty],
- [llvm_anyvector_ty, llvm_anyint_ty],
+ [LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
+ let VLOperand = 2;
}
// For Conversion unary operations with mask.
// Input: (maskedoff, vector_in, mask, vl, ta)
let VLOperand = 1;
}
def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
- [LLVMVectorElementType<0>, llvm_anyint_ty],
+ [LLVMVectorElementType<0>,llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 1;
}
defm vrgather_vx : RISCVRGatherVX;
defm vrgatherei16_vv : RISCVRGatherEI16VV;
- def "int_riscv_vcompress" : RISCVUnaryAAMaskNoTA;
+ def "int_riscv_vcompress" : RISCVUnaryAAMaskTU;
defm vaaddu : RISCVSaturatingBinaryAAX;
defm vaadd : RISCVSaturatingBinaryAAX;
defm vfncvt_rod_f_f_w : RISCVConversion;
// Output: (vector)
- // Input: (mask type input, vl)
+ // Input: (passthru, mask type input, vl)
def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
- [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ [LLVMMatchType<0>,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
- let VLOperand = 1;
+ let VLOperand = 2;
}
// Output: (vector)
// Input: (maskedoff, mask type vector_in, mask, vl)
let VLOperand = 3;
}
// Output: (vector)
- // Input: (vl)
- def int_riscv_vid : RISCVNullaryIntrinsic;
+ // Input: (passthru, vl)
+ def int_riscv_vid : RISCVNullaryIntrinsicTU;
// Output: (vector)
// Input: (maskedoff, mask, vl)
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
+class VPseudoNullaryNoMaskTU<VReg RegClass>:
+ Pseudo<(outs RegClass:$rd),
+ (ins RegClass:$merge, AVL:$vl, ixlenimm:$sew),
+ []>, RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let Constraints = "$rd = $merge";
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let HasDummyMask = 1;
+ let HasMergeOp = 1;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
class VPseudoNullaryMask<VReg RegClass>:
Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
(ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, AVL:$vl,
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
+// RetClass could be GPR or VReg.
+class VPseudoUnaryNoMaskTU<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
+ Pseudo<(outs RetClass:$rd),
+ (ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
+ let HasVLOp = 1;
+ let HasSEWOp = 1;
+ let HasDummyMask = 1;
+ let HasMergeOp = 1;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoNullaryNoMask<m.vrclass>,
Sched<[WriteVMIdxV, ReadVMask]>;
+ def "_V_" # m.MX # "_TU": VPseudoNullaryNoMaskTU<m.vrclass>,
+ Sched<[WriteVMIdxV, ReadVMask]>;
def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask<m.vrclass>,
Sched<[WriteVMIdxV, ReadVMask]>;
}
let VLMul = m.value in {
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, VR, constraint>,
Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
+ def "_" # m.MX # "_TU" : VPseudoUnaryNoMaskTU<m.vrclass, VR, constraint>,
+ Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>,
Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
}
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
- def "_V_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
+ def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
+ Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
+ def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
}
}
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
+ def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
+ Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
}
let VLMul = m.value in {
def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
+ def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
+ Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
}
let VLMul = m.value in {
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints>,
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
+ def "_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.f2vrclass, constraints>,
+ Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
def "_" # m.MX # "_MASK" :
VPseudoUnaryMaskTA<m.vrclass, m.f2vrclass, constraints>,
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
let VLMul = m.value in {
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints>,
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
+ def "_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.f4vrclass, constraints>,
+ Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
def "_" # m.MX # "_MASK" :
VPseudoUnaryMaskTA<m.vrclass, m.f4vrclass, constraints>,
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
let VLMul = m.value in {
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints>,
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
+ def "_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.f8vrclass, constraints>,
+ Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
def "_" # m.MX # "_MASK" :
VPseudoUnaryMaskTA<m.vrclass, m.f8vrclass, constraints>,
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
string Constraint = ""> {
let VLMul = MInfo.value in {
def "_" # MInfo.MX : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint>;
+ def "_" # MInfo.MX # "_TU": VPseudoUnaryNoMaskTU<RetClass, Op1Class, Constraint>;
def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskTA<RetClass, Op1Class,
Constraint>;
}
LMULInfo vlmul,
VReg op2_reg_class> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+ (result_type undef),
(op2_type op2_reg_class:$rs2),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
(op2_type op2_reg_class:$rs2),
GPR:$vl, sew)>;
+class VPatUnaryNoMaskTU<string intrinsic_name,
+ string inst,
+ string kind,
+ ValueType result_type,
+ ValueType op2_type,
+ int sew,
+ LMULInfo vlmul,
+ VReg result_reg_class,
+ VReg op2_reg_class> :
+ Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
+ (result_type result_reg_class:$merge),
+ (op2_type op2_reg_class:$rs2),
+ VLOpFrag)),
+ (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_TU")
+ (result_type result_reg_class:$merge),
+ (op2_type op2_reg_class:$rs2),
+ GPR:$vl, sew)>;
+
class VPatUnaryMask<string intrinsic_name,
string inst,
string kind,
foreach vti = AllIntegerVectors in {
def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
vti.Log2SEW, vti.LMul, VR>;
+ def : VPatUnaryNoMaskTU<intrinsic, instruction, "M", vti.Vector, vti.Mask,
+ vti.Log2SEW, vti.LMul, vti.RegClass,VR>;
def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
}
def : VPatUnaryNoMask<intrinsic, instruction, suffix,
vti.Vector, fti.Vector,
vti.Log2SEW, vti.LMul, fti.RegClass>;
+ def : VPatUnaryNoMaskTU<intrinsic, instruction, suffix,
+ vti.Vector, fti.Vector,
+ vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
def : VPatUnaryMaskTA<intrinsic, instruction, suffix,
vti.Vector, fti.Vector, vti.Mask,
vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
def : VPatUnaryNoMask<intrinsic, instruction, "V",
vti.Vector, vti.Vector,
vti.Log2SEW, vti.LMul, vti.RegClass>;
+ def : VPatUnaryNoMaskTU<intrinsic, instruction, "V",
+ vti.Vector, vti.Vector,
+ vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
def : VPatUnaryMaskTA<intrinsic, instruction, "V",
vti.Vector, vti.Vector, vti.Mask,
vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
{
foreach vti = AllIntegerVectors in {
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
+ (vti.Vector undef),
VLOpFrag)),
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
GPR:$vl, vti.Log2SEW)>;
+ def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
+ (vti.Vector vti.RegClass:$merge),
+ VLOpFrag)),
+ (!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_TU")
+ vti.RegClass:$merge, GPR:$vl, vti.Log2SEW)>;
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
(vti.Vector vti.RegClass:$merge),
(vti.Mask V0), VLOpFrag)),
{
def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
sew, vlmul, op1_reg_class>;
+ def : VPatUnaryNoMaskTU<intrinsic, inst, kind, result_type, op1_type,
+ sew, vlmul, result_reg_class, op1_reg_class>;
def : VPatUnaryMaskTA<intrinsic, inst, kind, result_type, op1_type,
mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
}
}
}
-multiclass VPatClassifyVI_VF<string intrinsic,
- string instruction>
-{
- foreach fvti = AllFloatVectors in
- {
- defvar ivti = GetIntVTypeInfo<fvti>.Vti;
-
- defm : VPatConversion<intrinsic, instruction, "V",
- ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
- fvti.LMul, ivti.RegClass, fvti.RegClass>;
- }
-}
-
multiclass VPatConversionVI_VF<string intrinsic,
string instruction>
{
//===----------------------------------------------------------------------===//
// 14.14. Vector Floating-Point Classify Instruction
//===----------------------------------------------------------------------===//
-defm : VPatClassifyVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
+defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
//===----------------------------------------------------------------------===//
// 14.15. Vector Floating-Point Merge Instruction
ret <vscale x 1 x i8> %a
}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsext_vf8_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; RV32-NEXT: vsext.vf8 v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsext_vf8_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; RV64-NEXT: vsext.vf8 v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vzext_vf8_nxv1i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; RV32-NEXT: vzext.vf8 v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vzext_vf8_nxv1i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e64, m1, tu, mu
+; RV64-NEXT: vzext.vf8 v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x float>,
+ iXLen);
+
+define <vscale x 2 x i16> @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( <vscale x 2 x i16> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; RV32-NEXT: vfncvt.x.f.w v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
+; RV64-NEXT: vfncvt.x.f.w v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x float> %1,
+ iXLen %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
+; RV32-LABEL: intrinsic_vid_v_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vid.v v8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vid_v_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vid.v v8
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
+ <vscale x 1 x i8> %0,
+ iXLen %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
+; RV32-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfclass.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfclass.v v8, v9
+; RV64-NEXT: ret
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2) nounwind {
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfcvt.f.x.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfcvt.f.x.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x i16>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfcvt.f.xu.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfcvt.f.xu.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i16> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfcvt.rtz.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfcvt.rtz.x.f.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfcvt.x.f.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
+ <vscale x 1 x half>,
+ <vscale x 1 x float>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfncvt.f.f.w v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfncvt.f.f.w v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x float> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i16> @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfcvt.xu.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfcvt.xu.f.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfncvt.f.x.w v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfncvt.f.x.w v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x i32>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfncvt.f.xu.w v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfncvt.f.xu.w v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i32> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
+ <vscale x 1 x half>,
+ <vscale x 1 x float>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfncvt.rod.f.f.w v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfncvt.rod.f.f.w v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x float> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vfncvt.rtz.x.f.w v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vfncvt.rtz.x.f.w v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vfncvt.rtz.xu.f.w v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vfncvt.rtz.xu.f.w v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vfncvt.x.f.w v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vfncvt.x.f.w v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vfncvt.xu.f.w v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vfncvt.xu.f.w v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfrec7.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfrec7.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfrsqrt7.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfrsqrt7.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfsqrt.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfsqrt.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfwcvt.f.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfwcvt.f.f.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vfwcvt.f.x.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vfwcvt.f.x.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x i8>,
+ iXLen);
+
+define <vscale x 1 x half> @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: vfwcvt.f.xu.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: vfwcvt.f.xu.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i8> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfwcvt.rtz.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfwcvt.rtz.x.f.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfwcvt.rtz.xu.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfwcvt.rtz.xu.f.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfwcvt.x.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfwcvt.x.f.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x half>,
+ iXLen);
+
+define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV32-NEXT: vfwcvt.xu.f.v v8, v9
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
+; RV64-NEXT: vfwcvt.xu.f.v v8, v9
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x half> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ iXLen);
+
+define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV32-NEXT: viota.m v8, v0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
+; RV64-NEXT: viota.m v8, v0
+; RV64-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i1> %1,
+ iXLen %2)
+
+ ret <vscale x 1 x i8> %a
+}
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x half>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x half> %0,
iXLen %1)
<vscale x 1 x i16>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
<vscale x 1 x i16> %0,
<vscale x 1 x half> %1,
<vscale x 1 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x half>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x half> %0,
iXLen %1)
<vscale x 2 x i16>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16:
<vscale x 2 x i16> %0,
<vscale x 2 x half> %1,
<vscale x 2 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x half>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x half> %0,
iXLen %1)
<vscale x 4 x i16>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16:
<vscale x 4 x i16> %0,
<vscale x 4 x half> %1,
<vscale x 4 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x half>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x half> %0,
iXLen %1)
<vscale x 8 x i16>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16:
<vscale x 8 x i16> %0,
<vscale x 8 x half> %1,
<vscale x 8 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x half>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x half> %0,
iXLen %1)
<vscale x 16 x i16>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16:
<vscale x 16 x i16> %0,
<vscale x 16 x half> %1,
<vscale x 16 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x half>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x half> %0,
iXLen %1)
<vscale x 32 x i16>,
<vscale x 32 x half>,
<vscale x 32 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16:
<vscale x 32 x i16> %0,
<vscale x 32 x half> %1,
<vscale x 32 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x float>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x float> %0,
iXLen %1)
<vscale x 1 x i32>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x i32> @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32:
<vscale x 1 x i32> %0,
<vscale x 1 x float> %1,
<vscale x 1 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x float>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x float> %0,
iXLen %1)
<vscale x 2 x i32>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x i32> @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32:
<vscale x 2 x i32> %0,
<vscale x 2 x float> %1,
<vscale x 2 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x float>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x float> %0,
iXLen %1)
<vscale x 4 x i32>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x i32> @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32:
<vscale x 4 x i32> %0,
<vscale x 4 x float> %1,
<vscale x 4 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x float>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x float> %0,
iXLen %1)
<vscale x 8 x i32>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x i32> @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32:
<vscale x 8 x i32> %0,
<vscale x 8 x float> %1,
<vscale x 8 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x float>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x float> %0,
iXLen %1)
<vscale x 16 x i32>,
<vscale x 16 x float>,
<vscale x 16 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 16 x i32> @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32:
<vscale x 16 x i32> %0,
<vscale x 16 x float> %1,
<vscale x 16 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x double>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x double> %0,
iXLen %1)
<vscale x 1 x i64>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 1 x i64> @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64:
<vscale x 1 x i64> %0,
<vscale x 1 x double> %1,
<vscale x 1 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x double>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x double> %0,
iXLen %1)
<vscale x 2 x i64>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 2 x i64> @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64:
<vscale x 2 x i64> %0,
<vscale x 2 x double> %1,
<vscale x 2 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x double>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x double> %0,
iXLen %1)
<vscale x 4 x i64>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 4 x i64> @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64:
<vscale x 4 x i64> %0,
<vscale x 4 x double> %1,
<vscale x 4 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x double>,
iXLen);
iXLen %1) nounwind {
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x double> %0,
iXLen %1)
<vscale x 8 x i64>,
<vscale x 8 x double>,
<vscale x 8 x i1>,
- iXLen);
+ iXLen, iXLen);
define <vscale x 8 x i64> @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64(
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64:
<vscale x 8 x i64> %0,
<vscale x 8 x double> %1,
<vscale x 8 x i1> %2,
- iXLen %3)
+ iXLen %3, iXLen 0)
ret <vscale x 8 x i64> %a
}
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
<vscale x 1 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
+ <vscale x 1 x half> undef,
<vscale x 1 x i16> %0,
iXLen %1)
}
declare <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
<vscale x 2 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16(
+ <vscale x 2 x half> undef,
<vscale x 2 x i16> %0,
iXLen %1)
}
declare <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
<vscale x 4 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16(
+ <vscale x 4 x half> undef,
<vscale x 4 x i16> %0,
iXLen %1)
}
declare <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
<vscale x 8 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16(
+ <vscale x 8 x half> undef,
<vscale x 8 x i16> %0,
iXLen %1)
}
declare <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
<vscale x 16 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16(
+ <vscale x 16 x half> undef,
<vscale x 16 x i16> %0,
iXLen %1)
}
declare <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
<vscale x 32 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16(
+ <vscale x 32 x half> undef,
<vscale x 32 x i16> %0,
iXLen %1)
}
declare <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
<vscale x 1 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32(
+ <vscale x 1 x float> undef,
<vscale x 1 x i32> %0,
iXLen %1)
}
declare <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
<vscale x 2 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32(
+ <vscale x 2 x float> undef,
<vscale x 2 x i32> %0,
iXLen %1)
}
declare <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
<vscale x 4 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32(
+ <vscale x 4 x float> undef,
<vscale x 4 x i32> %0,
iXLen %1)
}
declare <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
<vscale x 8 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32(
+ <vscale x 8 x float> undef,
<vscale x 8 x i32> %0,
iXLen %1)
}
declare <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
<vscale x 16 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32(
+ <vscale x 16 x float> undef,
<vscale x 16 x i32> %0,
iXLen %1)
}
declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
<vscale x 1 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
+ <vscale x 1 x double> undef,
<vscale x 1 x i64> %0,
iXLen %1)
}
declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
<vscale x 2 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
+ <vscale x 2 x double> undef,
<vscale x 2 x i64> %0,
iXLen %1)
}
declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
<vscale x 4 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
+ <vscale x 4 x double> undef,
<vscale x 4 x i64> %0,
iXLen %1)
}
declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
<vscale x 8 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
+ <vscale x 8 x double> undef,
<vscale x 8 x i64> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
<vscale x 1 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
+ <vscale x 1 x half> undef,
<vscale x 1 x i16> %0,
iXLen %1)
}
declare <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
<vscale x 2 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16(
+ <vscale x 2 x half> undef,
<vscale x 2 x i16> %0,
iXLen %1)
}
declare <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
<vscale x 4 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16(
+ <vscale x 4 x half> undef,
<vscale x 4 x i16> %0,
iXLen %1)
}
declare <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
<vscale x 8 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16(
+ <vscale x 8 x half> undef,
<vscale x 8 x i16> %0,
iXLen %1)
}
declare <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
<vscale x 16 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16(
+ <vscale x 16 x half> undef,
<vscale x 16 x i16> %0,
iXLen %1)
}
declare <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
<vscale x 32 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16(
+ <vscale x 32 x half> undef,
<vscale x 32 x i16> %0,
iXLen %1)
}
declare <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
<vscale x 1 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32(
+ <vscale x 1 x float> undef,
<vscale x 1 x i32> %0,
iXLen %1)
}
declare <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
<vscale x 2 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32(
+ <vscale x 2 x float> undef,
<vscale x 2 x i32> %0,
iXLen %1)
}
declare <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
<vscale x 4 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32(
+ <vscale x 4 x float> undef,
<vscale x 4 x i32> %0,
iXLen %1)
}
declare <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
<vscale x 8 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32(
+ <vscale x 8 x float> undef,
<vscale x 8 x i32> %0,
iXLen %1)
}
declare <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
<vscale x 16 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32(
+ <vscale x 16 x float> undef,
<vscale x 16 x i32> %0,
iXLen %1)
}
declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
<vscale x 1 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
+ <vscale x 1 x double> undef,
<vscale x 1 x i64> %0,
iXLen %1)
}
declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
<vscale x 2 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
+ <vscale x 2 x double> undef,
<vscale x 2 x i64> %0,
iXLen %1)
}
declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
<vscale x 4 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
+ <vscale x 4 x double> undef,
<vscale x 4 x i64> %0,
iXLen %1)
}
declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
<vscale x 8 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
+ <vscale x 8 x double> undef,
<vscale x 8 x i64> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16(
+ <vscale x 2 x i16>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16(
+ <vscale x 4 x i16>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16(
+ <vscale x 8 x i16>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16(
+ <vscale x 16 x i16>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16(
+ <vscale x 32 x i16>,
<vscale x 32 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x half> %0,
iXLen %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32(
+ <vscale x 1 x i32>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32(
+ <vscale x 2 x i32>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32(
+ <vscale x 4 x i32>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32(
+ <vscale x 8 x i32>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x float> %0,
iXLen %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32(
+ <vscale x 16 x i32>,
<vscale x 16 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x float> %0,
iXLen %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
+ <vscale x 1 x i64>,
<vscale x 1 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x double> %0,
iXLen %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
+ <vscale x 2 x i64>,
<vscale x 2 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x double> %0,
iXLen %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
+ <vscale x 4 x i64>,
<vscale x 4 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x double> %0,
iXLen %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
+ <vscale x 8 x i64>,
<vscale x 8 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16(
+ <vscale x 2 x i16>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16(
+ <vscale x 4 x i16>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16(
+ <vscale x 8 x i16>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16(
+ <vscale x 16 x i16>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16(
+ <vscale x 32 x i16>,
<vscale x 32 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x half> %0,
iXLen %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32(
+ <vscale x 1 x i32>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32(
+ <vscale x 2 x i32>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32(
+ <vscale x 4 x i32>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32(
+ <vscale x 8 x i32>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x float> %0,
iXLen %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32(
+ <vscale x 16 x i32>,
<vscale x 16 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x float> %0,
iXLen %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
+ <vscale x 1 x i64>,
<vscale x 1 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x double> %0,
iXLen %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
+ <vscale x 2 x i64>,
<vscale x 2 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x double> %0,
iXLen %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
+ <vscale x 4 x i64>,
<vscale x 4 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x double> %0,
iXLen %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
+ <vscale x 8 x i64>,
<vscale x 8 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16(
+ <vscale x 2 x i16>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16(
+ <vscale x 4 x i16>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16(
+ <vscale x 8 x i16>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16(
+ <vscale x 16 x i16>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16(
+ <vscale x 32 x i16>,
<vscale x 32 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x half> %0,
iXLen %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32(
+ <vscale x 1 x i32>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32(
+ <vscale x 2 x i32>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32(
+ <vscale x 4 x i32>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32(
+ <vscale x 8 x i32>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x float> %0,
iXLen %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32(
+ <vscale x 16 x i32>,
<vscale x 16 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x float> %0,
iXLen %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
+ <vscale x 1 x i64>,
<vscale x 1 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x double> %0,
iXLen %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
+ <vscale x 2 x i64>,
<vscale x 2 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x double> %0,
iXLen %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
+ <vscale x 4 x i64>,
<vscale x 4 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x double> %0,
iXLen %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
+ <vscale x 8 x i64>,
<vscale x 8 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16(
+ <vscale x 2 x i16>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16(
+ <vscale x 4 x i16>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16(
+ <vscale x 8 x i16>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16(
+ <vscale x 16 x i16>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16(
+ <vscale x 32 x i16>,
<vscale x 32 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x half> %0,
iXLen %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32(
+ <vscale x 1 x i32>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32(
+ <vscale x 2 x i32>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32(
+ <vscale x 4 x i32>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32(
+ <vscale x 8 x i32>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x float> %0,
iXLen %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32(
+ <vscale x 16 x i32>,
<vscale x 16 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x float> %0,
iXLen %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
+ <vscale x 1 x i64>,
<vscale x 1 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x double> %0,
iXLen %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
+ <vscale x 2 x i64>,
<vscale x 2 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x double> %0,
iXLen %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
+ <vscale x 4 x i64>,
<vscale x 4 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x double> %0,
iXLen %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
+ <vscale x 8 x i64>,
<vscale x 8 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
+ <vscale x 1 x half>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
+ <vscale x 1 x half> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32(
+ <vscale x 2 x half>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32(
+ <vscale x 2 x half> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32(
+ <vscale x 4 x half>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32(
+ <vscale x 4 x half> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32(
+ <vscale x 8 x half>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32(
+ <vscale x 8 x half> undef,
<vscale x 8 x float> %0,
iXLen %1)
}
declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32(
+ <vscale x 16 x half>,
<vscale x 16 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32(
+ <vscale x 16 x half> undef,
<vscale x 16 x float> %0,
iXLen %1)
}
declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64(
+ <vscale x 1 x float>,
<vscale x 1 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64(
+ <vscale x 1 x float> undef,
<vscale x 1 x double> %0,
iXLen %1)
}
declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64(
+ <vscale x 2 x float>,
<vscale x 2 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64(
+ <vscale x 2 x float> undef,
<vscale x 2 x double> %0,
iXLen %1)
}
declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64(
+ <vscale x 4 x float>,
<vscale x 4 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64(
+ <vscale x 4 x float> undef,
<vscale x 4 x double> %0,
iXLen %1)
}
declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64(
+ <vscale x 8 x float>,
<vscale x 8 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64(
+ <vscale x 8 x float> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
<vscale x 1 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
+ <vscale x 1 x half> undef,
<vscale x 1 x i32> %0,
iXLen %1)
}
declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
<vscale x 2 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32(
+ <vscale x 2 x half> undef,
<vscale x 2 x i32> %0,
iXLen %1)
}
declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
<vscale x 4 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32(
+ <vscale x 4 x half> undef,
<vscale x 4 x i32> %0,
iXLen %1)
}
declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
<vscale x 8 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32(
+ <vscale x 8 x half> undef,
<vscale x 8 x i32> %0,
iXLen %1)
}
declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
<vscale x 16 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32(
+ <vscale x 16 x half> undef,
<vscale x 16 x i32> %0,
iXLen %1)
}
declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
<vscale x 1 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
+ <vscale x 1 x float> undef,
<vscale x 1 x i64> %0,
iXLen %1)
}
declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
<vscale x 2 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
+ <vscale x 2 x float> undef,
<vscale x 2 x i64> %0,
iXLen %1)
}
declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
<vscale x 4 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
+ <vscale x 4 x float> undef,
<vscale x 4 x i64> %0,
iXLen %1)
}
declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
<vscale x 8 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
+ <vscale x 8 x float> undef,
<vscale x 8 x i64> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
<vscale x 1 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
+ <vscale x 1 x half> undef,
<vscale x 1 x i32> %0,
iXLen %1)
}
declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
<vscale x 2 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
+ <vscale x 2 x half> undef,
<vscale x 2 x i32> %0,
iXLen %1)
}
declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
<vscale x 4 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
+ <vscale x 4 x half> undef,
<vscale x 4 x i32> %0,
iXLen %1)
}
declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
<vscale x 8 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
+ <vscale x 8 x half> undef,
<vscale x 8 x i32> %0,
iXLen %1)
}
declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
<vscale x 16 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
+ <vscale x 16 x half> undef,
<vscale x 16 x i32> %0,
iXLen %1)
}
declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
<vscale x 1 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
+ <vscale x 1 x float> undef,
<vscale x 1 x i64> %0,
iXLen %1)
}
declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
<vscale x 2 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
+ <vscale x 2 x float> undef,
<vscale x 2 x i64> %0,
iXLen %1)
}
declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
<vscale x 4 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
+ <vscale x 4 x float> undef,
<vscale x 4 x i64> %0,
iXLen %1)
}
declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
<vscale x 8 x i64>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
+ <vscale x 8 x float> undef,
<vscale x 8 x i64> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
+ <vscale x 1 x half>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
+ <vscale x 1 x half> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32(
+ <vscale x 2 x half>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32(
+ <vscale x 2 x half> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32(
+ <vscale x 4 x half>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32(
+ <vscale x 4 x half> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32(
+ <vscale x 8 x half>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32(
+ <vscale x 8 x half> undef,
<vscale x 8 x float> %0,
iXLen %1)
}
declare <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32(
+ <vscale x 16 x half>,
<vscale x 16 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32(
+ <vscale x 16 x half> undef,
<vscale x 16 x float> %0,
iXLen %1)
}
declare <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64(
+ <vscale x 1 x float>,
<vscale x 1 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64(
+ <vscale x 1 x float> undef,
<vscale x 1 x double> %0,
iXLen %1)
}
declare <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64(
+ <vscale x 2 x float>,
<vscale x 2 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64(
+ <vscale x 2 x float> undef,
<vscale x 2 x double> %0,
iXLen %1)
}
declare <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64(
+ <vscale x 4 x float>,
<vscale x 4 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64(
+ <vscale x 4 x float> undef,
<vscale x 4 x double> %0,
iXLen %1)
}
declare <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64(
+ <vscale x 8 x float>,
<vscale x 8 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64(
+ <vscale x 8 x float> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16(
+ <vscale x 2 x i8>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16(
+ <vscale x 4 x i8>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16(
+ <vscale x 8 x i8>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16(
+ <vscale x 16 x i8>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16(
+ <vscale x 32 x i8>,
<vscale x 32 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x half> %0,
iXLen %1)
}
declare <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32(
+ <vscale x 1 x i16>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32(
+ <vscale x 2 x i16>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32(
+ <vscale x 4 x i16>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32(
+ <vscale x 8 x i16>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x float> %0,
iXLen %1)
}
declare <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32(
+ <vscale x 16 x i16>,
<vscale x 16 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x float> %0,
iXLen %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64(
+ <vscale x 1 x i32>,
<vscale x 1 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x double> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64(
+ <vscale x 2 x i32>,
<vscale x 2 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x double> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64(
+ <vscale x 4 x i32>,
<vscale x 4 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x double> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64(
+ <vscale x 8 x i32>,
<vscale x 8 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16(
+ <vscale x 2 x i8>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16(
+ <vscale x 4 x i8>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16(
+ <vscale x 8 x i8>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16(
+ <vscale x 16 x i8>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16(
+ <vscale x 32 x i8>,
<vscale x 32 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x half> %0,
iXLen %1)
}
declare <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32(
+ <vscale x 1 x i16>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32(
+ <vscale x 2 x i16>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32(
+ <vscale x 4 x i16>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32(
+ <vscale x 8 x i16>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x float> %0,
iXLen %1)
}
declare <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32(
+ <vscale x 16 x i16>,
<vscale x 16 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x float> %0,
iXLen %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64(
+ <vscale x 1 x i32>,
<vscale x 1 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x double> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64(
+ <vscale x 2 x i32>,
<vscale x 2 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x double> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64(
+ <vscale x 4 x i32>,
<vscale x 4 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x double> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64(
+ <vscale x 8 x i32>,
<vscale x 8 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16(
+ <vscale x 2 x i8>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16(
+ <vscale x 4 x i8>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16(
+ <vscale x 8 x i8>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16(
+ <vscale x 16 x i8>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16(
+ <vscale x 32 x i8>,
<vscale x 32 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x half> %0,
iXLen %1)
}
declare <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32(
+ <vscale x 1 x i16>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
+ <vscale x 2 x i16>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32(
+ <vscale x 4 x i16>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32(
+ <vscale x 8 x i16>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x float> %0,
iXLen %1)
}
declare <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32(
+ <vscale x 16 x i16>,
<vscale x 16 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x float> %0,
iXLen %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64(
+ <vscale x 1 x i32>,
<vscale x 1 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x double> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64(
+ <vscale x 2 x i32>,
<vscale x 2 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x double> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64(
+ <vscale x 4 x i32>,
<vscale x 4 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x double> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64(
+ <vscale x 8 x i32>,
<vscale x 8 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
+ <vscale x 1 x i8> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
+ <vscale x 2 x i8>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
+ <vscale x 2 x i8> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
+ <vscale x 4 x i8>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
+ <vscale x 4 x i8> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
+ <vscale x 8 x i8>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
+ <vscale x 8 x i8> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
+ <vscale x 16 x i8>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
+ <vscale x 16 x i8> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
+ <vscale x 32 x i8>,
<vscale x 32 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
+ <vscale x 32 x i8> undef,
<vscale x 32 x half> %0,
iXLen %1)
}
declare <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
+ <vscale x 1 x i16>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
+ <vscale x 1 x i16> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
+ <vscale x 2 x i16>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
+ <vscale x 2 x i16> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
+ <vscale x 4 x i16>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
+ <vscale x 4 x i16> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
+ <vscale x 8 x i16>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
+ <vscale x 8 x i16> undef,
<vscale x 8 x float> %0,
iXLen %1)
}
declare <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
+ <vscale x 16 x i16>,
<vscale x 16 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
+ <vscale x 16 x i16> undef,
<vscale x 16 x float> %0,
iXLen %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
+ <vscale x 1 x i32>,
<vscale x 1 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
+ <vscale x 1 x i32> undef,
<vscale x 1 x double> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
+ <vscale x 2 x i32>,
<vscale x 2 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
+ <vscale x 2 x i32> undef,
<vscale x 2 x double> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
+ <vscale x 4 x i32>,
<vscale x 4 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
+ <vscale x 4 x i32> undef,
<vscale x 4 x double> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
+ <vscale x 8 x i32>,
<vscale x 8 x double>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
+ <vscale x 8 x i32> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
<vscale x 1 x half>,
+ <vscale x 1 x half>,
iXLen);
define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
iXLen %1)
declare <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
<vscale x 2 x half>,
+ <vscale x 2 x half>,
iXLen);
define <vscale x 2 x half> @intrinsic_vfrec7_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
iXLen %1)
declare <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
<vscale x 4 x half>,
+ <vscale x 4 x half>,
iXLen);
define <vscale x 4 x half> @intrinsic_vfrec7_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
iXLen %1)
declare <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
<vscale x 8 x half>,
+ <vscale x 8 x half>,
iXLen);
define <vscale x 8 x half> @intrinsic_vfrec7_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
iXLen %1)
declare <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
<vscale x 16 x half>,
+ <vscale x 16 x half>,
iXLen);
define <vscale x 16 x half> @intrinsic_vfrec7_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
iXLen %1)
declare <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
<vscale x 32 x half>,
+ <vscale x 32 x half>,
iXLen);
define <vscale x 32 x half> @intrinsic_vfrec7_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
iXLen %1)
declare <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
<vscale x 1 x float>,
+ <vscale x 1 x float>,
iXLen);
define <vscale x 1 x float> @intrinsic_vfrec7_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
iXLen %1)
declare <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
<vscale x 2 x float>,
+ <vscale x 2 x float>,
iXLen);
define <vscale x 2 x float> @intrinsic_vfrec7_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
iXLen %1)
declare <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
<vscale x 4 x float>,
+ <vscale x 4 x float>,
iXLen);
define <vscale x 4 x float> @intrinsic_vfrec7_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
iXLen %1)
declare <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
<vscale x 8 x float>,
+ <vscale x 8 x float>,
iXLen);
define <vscale x 8 x float> @intrinsic_vfrec7_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
iXLen %1)
declare <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
<vscale x 16 x float>,
+ <vscale x 16 x float>,
iXLen);
define <vscale x 16 x float> @intrinsic_vfrec7_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
iXLen %1)
declare <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
<vscale x 1 x double>,
+ <vscale x 1 x double>,
iXLen);
define <vscale x 1 x double> @intrinsic_vfrec7_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
iXLen %1)
declare <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
<vscale x 2 x double>,
+ <vscale x 2 x double>,
iXLen);
define <vscale x 2 x double> @intrinsic_vfrec7_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
iXLen %1)
declare <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
<vscale x 4 x double>,
+ <vscale x 4 x double>,
iXLen);
define <vscale x 4 x double> @intrinsic_vfrec7_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
iXLen %1)
declare <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
<vscale x 8 x double>,
+ <vscale x 8 x double>,
iXLen);
define <vscale x 8 x double> @intrinsic_vfrec7_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
<vscale x 1 x half>,
+ <vscale x 1 x half>,
iXLen);
define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
iXLen %1)
declare <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16(
<vscale x 2 x half>,
+ <vscale x 2 x half>,
iXLen);
define <vscale x 2 x half> @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
iXLen %1)
declare <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16(
<vscale x 4 x half>,
+ <vscale x 4 x half>,
iXLen);
define <vscale x 4 x half> @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
iXLen %1)
declare <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16(
<vscale x 8 x half>,
+ <vscale x 8 x half>,
iXLen);
define <vscale x 8 x half> @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
iXLen %1)
declare <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16(
<vscale x 16 x half>,
+ <vscale x 16 x half>,
iXLen);
define <vscale x 16 x half> @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
iXLen %1)
declare <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16(
<vscale x 32 x half>,
+ <vscale x 32 x half>,
iXLen);
define <vscale x 32 x half> @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
iXLen %1)
declare <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32(
<vscale x 1 x float>,
+ <vscale x 1 x float>,
iXLen);
define <vscale x 1 x float> @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
iXLen %1)
declare <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32(
<vscale x 2 x float>,
+ <vscale x 2 x float>,
iXLen);
define <vscale x 2 x float> @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
iXLen %1)
declare <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(
<vscale x 4 x float>,
+ <vscale x 4 x float>,
iXLen);
define <vscale x 4 x float> @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
iXLen %1)
declare <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32(
<vscale x 8 x float>,
+ <vscale x 8 x float>,
iXLen);
define <vscale x 8 x float> @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
iXLen %1)
declare <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32(
<vscale x 16 x float>,
+ <vscale x 16 x float>,
iXLen);
define <vscale x 16 x float> @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
iXLen %1)
declare <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64(
<vscale x 1 x double>,
+ <vscale x 1 x double>,
iXLen);
define <vscale x 1 x double> @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
iXLen %1)
declare <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64(
<vscale x 2 x double>,
+ <vscale x 2 x double>,
iXLen);
define <vscale x 2 x double> @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
iXLen %1)
declare <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64(
<vscale x 4 x double>,
+ <vscale x 4 x double>,
iXLen);
define <vscale x 4 x double> @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
iXLen %1)
declare <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64(
<vscale x 8 x double>,
+ <vscale x 8 x double>,
iXLen);
define <vscale x 8 x double> @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
<vscale x 1 x half>,
+ <vscale x 1 x half>,
iXLen);
define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
+ <vscale x 1 x half> undef,
<vscale x 1 x half> %0,
iXLen %1)
declare <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
<vscale x 2 x half>,
+ <vscale x 2 x half>,
iXLen);
define <vscale x 2 x half> @intrinsic_vfsqrt_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
+ <vscale x 2 x half> undef,
<vscale x 2 x half> %0,
iXLen %1)
declare <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
<vscale x 4 x half>,
+ <vscale x 4 x half>,
iXLen);
define <vscale x 4 x half> @intrinsic_vfsqrt_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
+ <vscale x 4 x half> undef,
<vscale x 4 x half> %0,
iXLen %1)
declare <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
<vscale x 8 x half>,
+ <vscale x 8 x half>,
iXLen);
define <vscale x 8 x half> @intrinsic_vfsqrt_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
+ <vscale x 8 x half> undef,
<vscale x 8 x half> %0,
iXLen %1)
declare <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
<vscale x 16 x half>,
+ <vscale x 16 x half>,
iXLen);
define <vscale x 16 x half> @intrinsic_vfsqrt_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
+ <vscale x 16 x half> undef,
<vscale x 16 x half> %0,
iXLen %1)
declare <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
<vscale x 32 x half>,
+ <vscale x 32 x half>,
iXLen);
define <vscale x 32 x half> @intrinsic_vfsqrt_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
+ <vscale x 32 x half> undef,
<vscale x 32 x half> %0,
iXLen %1)
declare <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
<vscale x 1 x float>,
+ <vscale x 1 x float>,
iXLen);
define <vscale x 1 x float> @intrinsic_vfsqrt_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
+ <vscale x 1 x float> undef,
<vscale x 1 x float> %0,
iXLen %1)
declare <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
<vscale x 2 x float>,
+ <vscale x 2 x float>,
iXLen);
define <vscale x 2 x float> @intrinsic_vfsqrt_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
+ <vscale x 2 x float> undef,
<vscale x 2 x float> %0,
iXLen %1)
declare <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
<vscale x 4 x float>,
+ <vscale x 4 x float>,
iXLen);
define <vscale x 4 x float> @intrinsic_vfsqrt_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
+ <vscale x 4 x float> undef,
<vscale x 4 x float> %0,
iXLen %1)
declare <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
<vscale x 8 x float>,
+ <vscale x 8 x float>,
iXLen);
define <vscale x 8 x float> @intrinsic_vfsqrt_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
+ <vscale x 8 x float> undef,
<vscale x 8 x float> %0,
iXLen %1)
declare <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
<vscale x 16 x float>,
+ <vscale x 16 x float>,
iXLen);
define <vscale x 16 x float> @intrinsic_vfsqrt_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
+ <vscale x 16 x float> undef,
<vscale x 16 x float> %0,
iXLen %1)
declare <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
<vscale x 1 x double>,
+ <vscale x 1 x double>,
iXLen);
define <vscale x 1 x double> @intrinsic_vfsqrt_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
+ <vscale x 1 x double> undef,
<vscale x 1 x double> %0,
iXLen %1)
declare <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
<vscale x 2 x double>,
+ <vscale x 2 x double>,
iXLen);
define <vscale x 2 x double> @intrinsic_vfsqrt_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
+ <vscale x 2 x double> undef,
<vscale x 2 x double> %0,
iXLen %1)
declare <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
<vscale x 4 x double>,
+ <vscale x 4 x double>,
iXLen);
define <vscale x 4 x double> @intrinsic_vfsqrt_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
+ <vscale x 4 x double> undef,
<vscale x 4 x double> %0,
iXLen %1)
declare <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
<vscale x 8 x double>,
+ <vscale x 8 x double>,
iXLen);
define <vscale x 8 x double> @intrinsic_vfsqrt_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
+ <vscale x 8 x double> undef,
<vscale x 8 x double> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
+ <vscale x 1 x float>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
+ <vscale x 1 x float> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
+ <vscale x 2 x float>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
+ <vscale x 2 x float> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
+ <vscale x 4 x float>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
+ <vscale x 4 x float> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
+ <vscale x 8 x float>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
+ <vscale x 8 x float> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
+ <vscale x 16 x float>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
+ <vscale x 16 x float> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
+ <vscale x 1 x double>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
+ <vscale x 1 x double> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
+ <vscale x 2 x double>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
+ <vscale x 2 x double> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
+ <vscale x 4 x double>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
+ <vscale x 4 x double> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
+ <vscale x 8 x double>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
+ <vscale x 8 x double> undef,
<vscale x 8 x float> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
<vscale x 1 x i8>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
+ <vscale x 1 x half> undef,
<vscale x 1 x i8> %0,
iXLen %1)
}
declare <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
<vscale x 2 x i8>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8(
+ <vscale x 2 x half> undef,
<vscale x 2 x i8> %0,
iXLen %1)
}
declare <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
<vscale x 4 x i8>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8(
+ <vscale x 4 x half> undef,
<vscale x 4 x i8> %0,
iXLen %1)
}
declare <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
<vscale x 8 x i8>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8(
+ <vscale x 8 x half> undef,
<vscale x 8 x i8> %0,
iXLen %1)
}
declare <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
<vscale x 16 x i8>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8(
+ <vscale x 16 x half> undef,
<vscale x 16 x i8> %0,
iXLen %1)
}
declare <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
<vscale x 32 x i8>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8(
+ <vscale x 32 x half> undef,
<vscale x 32 x i8> %0,
iXLen %1)
}
declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
<vscale x 1 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16(
+ <vscale x 1 x float> undef,
<vscale x 1 x i16> %0,
iXLen %1)
}
declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
<vscale x 2 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16(
+ <vscale x 2 x float> undef,
<vscale x 2 x i16> %0,
iXLen %1)
}
declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
<vscale x 4 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16(
+ <vscale x 4 x float> undef,
<vscale x 4 x i16> %0,
iXLen %1)
}
declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
<vscale x 8 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16(
+ <vscale x 8 x float> undef,
<vscale x 8 x i16> %0,
iXLen %1)
}
declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
<vscale x 16 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16(
+ <vscale x 16 x float> undef,
<vscale x 16 x i16> %0,
iXLen %1)
}
declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
<vscale x 1 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32(
+ <vscale x 1 x double> undef,
<vscale x 1 x i32> %0,
iXLen %1)
}
declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
<vscale x 2 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32(
+ <vscale x 2 x double> undef,
<vscale x 2 x i32> %0,
iXLen %1)
}
declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
<vscale x 4 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32(
+ <vscale x 4 x double> undef,
<vscale x 4 x i32> %0,
iXLen %1)
}
declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
<vscale x 8 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32(
+ <vscale x 8 x double> undef,
<vscale x 8 x i32> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
<vscale x 1 x i8>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
+ <vscale x 1 x half> undef,
<vscale x 1 x i8> %0,
iXLen %1)
}
declare <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
<vscale x 2 x i8>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8(
+ <vscale x 2 x half> undef,
<vscale x 2 x i8> %0,
iXLen %1)
}
declare <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
<vscale x 4 x i8>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8(
+ <vscale x 4 x half> undef,
<vscale x 4 x i8> %0,
iXLen %1)
}
declare <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
<vscale x 8 x i8>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8(
+ <vscale x 8 x half> undef,
<vscale x 8 x i8> %0,
iXLen %1)
}
declare <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
<vscale x 16 x i8>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8(
+ <vscale x 16 x half> undef,
<vscale x 16 x i8> %0,
iXLen %1)
}
declare <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
<vscale x 32 x i8>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8(
+ <vscale x 32 x half> undef,
<vscale x 32 x i8> %0,
iXLen %1)
}
declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
<vscale x 1 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16(
+ <vscale x 1 x float> undef,
<vscale x 1 x i16> %0,
iXLen %1)
}
declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
<vscale x 2 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16(
+ <vscale x 2 x float> undef,
<vscale x 2 x i16> %0,
iXLen %1)
}
declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
<vscale x 4 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16(
+ <vscale x 4 x float> undef,
<vscale x 4 x i16> %0,
iXLen %1)
}
declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
<vscale x 8 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16(
+ <vscale x 8 x float> undef,
<vscale x 8 x i16> %0,
iXLen %1)
}
declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
<vscale x 16 x i16>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16(
+ <vscale x 16 x float> undef,
<vscale x 16 x i16> %0,
iXLen %1)
}
declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
<vscale x 1 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32(
+ <vscale x 1 x double> undef,
<vscale x 1 x i32> %0,
iXLen %1)
}
declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
<vscale x 2 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32(
+ <vscale x 2 x double> undef,
<vscale x 2 x i32> %0,
iXLen %1)
}
declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
<vscale x 4 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32(
+ <vscale x 4 x double> undef,
<vscale x 4 x i32> %0,
iXLen %1)
}
declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
<vscale x 8 x i32>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32(
+ <vscale x 8 x double> undef,
<vscale x 8 x i32> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
+ <vscale x 2 x i32>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
+ <vscale x 4 x i32>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
+ <vscale x 8 x i32>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
+ <vscale x 16 x i32>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
+ <vscale x 1 x i64>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
+ <vscale x 2 x i64>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
+ <vscale x 4 x i64>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
+ <vscale x 8 x i64>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x float> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16(
+ <vscale x 2 x i32>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16(
+ <vscale x 4 x i32>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16(
+ <vscale x 8 x i32>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16(
+ <vscale x 16 x i32>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
+ <vscale x 1 x i64>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
+ <vscale x 2 x i64>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
+ <vscale x 4 x i64>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
+ <vscale x 8 x i64>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x float> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16(
+ <vscale x 2 x i32>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16(
+ <vscale x 4 x i32>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16(
+ <vscale x 8 x i32>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16(
+ <vscale x 16 x i32>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
+ <vscale x 1 x i64>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
+ <vscale x 2 x i64>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
+ <vscale x 4 x i64>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
+ <vscale x 8 x i64>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x float> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32>,
<vscale x 1 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x half> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
+ <vscale x 2 x i32>,
<vscale x 2 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x half> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
+ <vscale x 4 x i32>,
<vscale x 4 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x half> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
+ <vscale x 8 x i32>,
<vscale x 8 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x half> %0,
iXLen %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
+ <vscale x 16 x i32>,
<vscale x 16 x half>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x half> %0,
iXLen %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
+ <vscale x 1 x i64>,
<vscale x 1 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x float> %0,
iXLen %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
+ <vscale x 2 x i64>,
<vscale x 2 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x float> %0,
iXLen %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
+ <vscale x 4 x i64>,
<vscale x 4 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x float> %0,
iXLen %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
+ <vscale x 8 x i64>,
<vscale x 8 x float>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x float> %0,
iXLen %1)
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
; RUN: -verify-machineinstrs | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
+ <vscale x 1 x i8>,
iXLen);
define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
+ <vscale x 1 x i8> undef,
iXLen %0)
ret <vscale x 1 x i8> %a
}
declare <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
+ <vscale x 2 x i8>,
iXLen);
define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
+ <vscale x 2 x i8> undef,
iXLen %0)
ret <vscale x 2 x i8> %a
}
declare <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
+ <vscale x 4 x i8>,
iXLen);
define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
+ <vscale x 4 x i8> undef,
iXLen %0)
ret <vscale x 4 x i8> %a
}
declare <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
+ <vscale x 8 x i8>,
iXLen);
define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
+ <vscale x 8 x i8> undef,
iXLen %0)
ret <vscale x 8 x i8> %a
}
declare <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
+ <vscale x 16 x i8>,
iXLen);
define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
+ <vscale x 16 x i8> undef,
iXLen %0)
ret <vscale x 16 x i8> %a
}
declare <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
+ <vscale x 32 x i8>,
iXLen);
define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
+ <vscale x 32 x i8> undef,
iXLen %0)
ret <vscale x 32 x i8> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
+ <vscale x 1 x i16>,
iXLen);
define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
+ <vscale x 1 x i16> undef,
iXLen %0)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
+ <vscale x 2 x i16>,
iXLen);
define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
+ <vscale x 2 x i16> undef,
iXLen %0)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
+ <vscale x 4 x i16>,
iXLen);
define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
+ <vscale x 4 x i16> undef,
iXLen %0)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
+ <vscale x 8 x i16>,
iXLen);
define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
+ <vscale x 8 x i16> undef,
iXLen %0)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
+ <vscale x 16 x i16>,
iXLen);
define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
+ <vscale x 16 x i16> undef,
iXLen %0)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
+ <vscale x 32 x i16>,
iXLen);
define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
+ <vscale x 32 x i16> undef,
iXLen %0)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
+ <vscale x 1 x i32>,
iXLen);
define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
+ <vscale x 1 x i32> undef,
iXLen %0)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
+ <vscale x 2 x i32>,
iXLen);
define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
+ <vscale x 2 x i32> undef,
iXLen %0)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
+ <vscale x 4 x i32>,
iXLen);
define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
+ <vscale x 4 x i32> undef,
iXLen %0)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
+ <vscale x 8 x i32>,
iXLen);
define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
+ <vscale x 8 x i32> undef,
iXLen %0)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
+ <vscale x 16 x i32>,
iXLen);
define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
+ <vscale x 16 x i32> undef,
iXLen %0)
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
+ <vscale x 1 x i64>,
iXLen);
define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
+ <vscale x 1 x i64> undef,
iXLen %0)
ret <vscale x 1 x i64> %a
}
declare <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
+ <vscale x 2 x i64>,
iXLen);
define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
+ <vscale x 2 x i64> undef,
iXLen %0)
ret <vscale x 2 x i64> %a
}
declare <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
+ <vscale x 4 x i64>,
iXLen);
define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
+ <vscale x 4 x i64> undef,
iXLen %0)
ret <vscale x 4 x i64> %a
}
declare <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
+ <vscale x 8 x i64>,
iXLen);
define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(iXLen %0) nounwind {
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
+ <vscale x 8 x i64> undef,
iXLen %0)
ret <vscale x 8 x i64> %a
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
; RUN: -verify-machineinstrs | FileCheck %s
declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
+ <vscale x 1 x i8>,
<vscale x 1 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
+ <vscale x 1 x i8> undef,
<vscale x 1 x i1> %0,
iXLen %1)
}
declare <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
+ <vscale x 2 x i8>,
<vscale x 2 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
+ <vscale x 2 x i8> undef,
<vscale x 2 x i1> %0,
iXLen %1)
}
declare <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
+ <vscale x 4 x i8>,
<vscale x 4 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
+ <vscale x 4 x i8> undef,
<vscale x 4 x i1> %0,
iXLen %1)
}
declare <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
+ <vscale x 8 x i8>,
<vscale x 8 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
+ <vscale x 8 x i8> undef,
<vscale x 8 x i1> %0,
iXLen %1)
}
declare <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
+ <vscale x 16 x i8>,
<vscale x 16 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
+ <vscale x 16 x i8> undef,
<vscale x 16 x i1> %0,
iXLen %1)
}
declare <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
+ <vscale x 32 x i8>,
<vscale x 32 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
+ <vscale x 32 x i8> undef,
<vscale x 32 x i1> %0,
iXLen %1)
}
declare <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
+ <vscale x 64 x i8>,
<vscale x 64 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
+ <vscale x 64 x i8> undef,
<vscale x 64 x i1> %0,
iXLen %1)
}
declare <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
+ <vscale x 1 x i16>,
<vscale x 1 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i1> %0,
iXLen %1)
}
declare <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
+ <vscale x 2 x i16>,
<vscale x 2 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i1> %0,
iXLen %1)
}
declare <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
+ <vscale x 4 x i16>,
<vscale x 4 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i1> %0,
iXLen %1)
}
declare <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
+ <vscale x 8 x i16>,
<vscale x 8 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i1> %0,
iXLen %1)
}
declare <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
+ <vscale x 16 x i16>,
<vscale x 16 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i1> %0,
iXLen %1)
}
declare <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
+ <vscale x 32 x i16>,
<vscale x 32 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i1> %0,
iXLen %1)
}
declare <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
+ <vscale x 1 x i32>,
<vscale x 1 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i1> %0,
iXLen %1)
}
declare <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
+ <vscale x 2 x i32>,
<vscale x 2 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i1> %0,
iXLen %1)
}
declare <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
+ <vscale x 4 x i32>,
<vscale x 4 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i1> %0,
iXLen %1)
}
declare <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
+ <vscale x 8 x i32>,
<vscale x 8 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i1> %0,
iXLen %1)
}
declare <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
+ <vscale x 16 x i32>,
<vscale x 16 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i1> %0,
iXLen %1)
}
declare <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
+ <vscale x 1 x i64>,
<vscale x 1 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i1> %0,
iXLen %1)
}
declare <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
+ <vscale x 2 x i64>,
<vscale x 2 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i1> %0,
iXLen %1)
}
declare <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
+ <vscale x 4 x i64>,
<vscale x 4 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i1> %0,
iXLen %1)
}
declare <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
+ <vscale x 8 x i64>,
<vscale x 8 x i1>,
iXLen);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i1> %0,
iXLen %1)
br i1 %tobool, label %if.else, label %if.then
if.then: ; preds = %entry
- %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> %a, i64 %2)
+ %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %2)
br label %if.end
if.else: ; preds = %entry
- %c = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> %a, i64 %2)
+ %c = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %2)
br label %if.end
if.end: ; preds = %if.else, %if.then
declare void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64) #4
; Function Attrs: nounwind readnone
- declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32>, i64) #1
+ declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
; Function Attrs: nounwind readnone
- declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32>, i64) #1
+ declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
attributes #0 = { "target-features"="+v" }
attributes #1 = { nounwind readnone }
define <vscale x 1 x i64> @load_zext(<vscale x 1 x i32>* %0, i64 %1) #0 {
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32>* %0, i64 %1)
- %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> %a, i64 %1)
+ %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %1)
ret <vscale x 1 x i64> %b
}
declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, <vscale x 1 x i32>* nocapture, i64) #4
; Function Attrs: nounwind readnone
- declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32>, i64) #1
+ declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
attributes #0 = { "target-features"="+v" }
attributes #1 = { nounwind readnone }
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
<vscale x 1 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i8> %0,
i32 %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
<vscale x 2 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i8> %0,
i32 %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
<vscale x 4 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i8> %0,
i32 %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
<vscale x 8 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i8> %0,
i32 %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
<vscale x 1 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i16> %0,
i32 %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
<vscale x 2 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i16> %0,
i32 %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
<vscale x 4 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i16> %0,
i32 %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
<vscale x 8 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i16> %0,
i32 %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
<vscale x 1 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i8> %0,
i32 %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
<vscale x 2 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i8> %0,
i32 %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
<vscale x 4 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i8> %0,
i32 %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
<vscale x 8 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i8> %0,
i32 %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
<vscale x 16 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i8> %0,
i32 %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i32 %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i32 %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i32 %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i32 %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i32 %1)
}
declare <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i32 %1)
}
declare <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i32 %1)
}
declare <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i32 %1)
}
declare <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i32 %1)
}
declare <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i32 %1)
}
declare <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i32 %1)
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
<vscale x 1 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i8> %0,
i64 %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
<vscale x 2 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i8> %0,
i64 %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
<vscale x 4 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i8> %0,
i64 %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
<vscale x 8 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i8> %0,
i64 %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
<vscale x 1 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i16> %0,
i64 %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
<vscale x 2 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i16> %0,
i64 %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
<vscale x 4 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i16> %0,
i64 %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
<vscale x 8 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i16> %0,
i64 %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
<vscale x 1 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i8> %0,
i64 %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
<vscale x 2 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i8> %0,
i64 %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
<vscale x 4 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i8> %0,
i64 %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
<vscale x 8 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i8> %0,
i64 %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
<vscale x 16 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i8> %0,
i64 %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i64 %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i64 %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i64 %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i64 %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i64 %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i64 %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i64 %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i64 %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i64 %1)
}
declare <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i64 %1)
}
declare <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i64 %1)
}
declare <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i64 %1)
}
declare <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i64 %1)
}
declare <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i64 %1)
}
declare <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i64 %1)
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
<vscale x 1 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i8> %0,
i32 %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
<vscale x 2 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i8> %0,
i32 %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
<vscale x 4 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i8> %0,
i32 %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
<vscale x 8 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i8> %0,
i32 %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
<vscale x 1 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i16> %0,
i32 %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
<vscale x 2 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i16> %0,
i32 %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
<vscale x 4 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i16> %0,
i32 %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
<vscale x 8 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i16> %0,
i32 %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
<vscale x 1 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i8> %0,
i32 %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
<vscale x 2 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i8> %0,
i32 %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
<vscale x 4 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i8> %0,
i32 %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
<vscale x 8 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i8> %0,
i32 %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
<vscale x 16 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i8> %0,
i32 %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i32 %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i32 %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i32 %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i32 %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i32 %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i32 %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i32 %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i32 %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i32 %1)
}
declare <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i32 %1)
}
declare <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i32 %1)
}
declare <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i32 %1)
}
declare <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i32 %1)
}
declare <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i32 %1)
}
declare <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i32 %1)
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
; RUN: < %s | FileCheck %s
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
<vscale x 1 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i8> %0,
i64 %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
<vscale x 2 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i8> %0,
i64 %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
<vscale x 4 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i8> %0,
i64 %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
<vscale x 8 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i8> %0,
i64 %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
<vscale x 1 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i16> %0,
i64 %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
<vscale x 2 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i16> %0,
i64 %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
<vscale x 4 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i16> %0,
i64 %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
<vscale x 8 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i16> %0,
i64 %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
<vscale x 1 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i8> %0,
i64 %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
<vscale x 2 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i8> %0,
i64 %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
<vscale x 4 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i8> %0,
i64 %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
<vscale x 8 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i8> %0,
i64 %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
<vscale x 16 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i8> %0,
i64 %1)
}
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
<vscale x 1 x i32>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> undef,
<vscale x 1 x i32> %0,
i64 %1)
}
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
<vscale x 2 x i32>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> undef,
<vscale x 2 x i32> %0,
i64 %1)
}
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
<vscale x 4 x i32>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> undef,
<vscale x 4 x i32> %0,
i64 %1)
}
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
<vscale x 8 x i32>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> undef,
<vscale x 8 x i32> %0,
i64 %1)
}
declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
<vscale x 1 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> undef,
<vscale x 1 x i16> %0,
i64 %1)
}
declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
<vscale x 2 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> undef,
<vscale x 2 x i16> %0,
i64 %1)
}
declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
<vscale x 4 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> undef,
<vscale x 4 x i16> %0,
i64 %1)
}
declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
<vscale x 8 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> undef,
<vscale x 8 x i16> %0,
i64 %1)
}
declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
<vscale x 16 x i16>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> undef,
<vscale x 16 x i16> %0,
i64 %1)
}
declare <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
<vscale x 1 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> undef,
<vscale x 1 x i8> %0,
i64 %1)
}
declare <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
<vscale x 2 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> undef,
<vscale x 2 x i8> %0,
i64 %1)
}
declare <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
<vscale x 4 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> undef,
<vscale x 4 x i8> %0,
i64 %1)
}
declare <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
<vscale x 8 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> undef,
<vscale x 8 x i8> %0,
i64 %1)
}
declare <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
<vscale x 16 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> undef,
<vscale x 16 x i8> %0,
i64 %1)
}
declare <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
<vscale x 32 x i8>,
i64);
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> undef,
<vscale x 32 x i8> %0,
i64 %1)