Define vector compare intrinsics and lower them to V instructions.
We work with @rogfer01 from BSC to come out this patch.
Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com>
Co-Authored-by: Hsiangkai Wang <kai.wang@sifive.com>
Differential Revision: https://reviews.llvm.org/D93368
[IntrNoMem]>, RISCVVIntrinsic {
let ExtendOperand = 2;
}
+ // For binary operations with mask type output without mask.
+ // Output: (mask type output)
+ // Input: (vector_in, vector_in/scalar_in, vl)
+ class RISCVCompareNoMask
+ : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+ [llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let ExtendOperand = 2;
+ }
+ // For binary operations with mask type output with mask.
+ // Output: (mask type output)
+ // Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
+ class RISCVCompareMask
+ : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
+ [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+ llvm_anyvector_ty, llvm_any_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let ExtendOperand = 3;
+ }
// For Saturating binary operations.
// The destination vector type is the same as first source vector.
def "int_riscv_" # NAME : RISCVSStore;
def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask;
}
+
multiclass RISCVIStore {
def "int_riscv_" # NAME : RISCVIStore;
def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask;
}
+
+ // AAX means the destination type(A) is the same as the first source
+ // type(A). X means any type for the second source operand.
multiclass RISCVBinaryAAX {
def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
}
+ // ABX means the destination type(A) is different from the first source
+ // type(B). X means any type for the second source operand.
multiclass RISCVBinaryABX {
def "int_riscv_" # NAME : RISCVBinaryABXNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask;
def "int_riscv_" # NAME : RISCVTernaryAAXANoMask;
def "int_riscv_" # NAME # "_mask" : RISCVTernaryAAXAMask;
}
+ multiclass RISCVCompare {
+ def "int_riscv_" # NAME : RISCVCompareNoMask;
+ def "int_riscv_" # NAME # "_mask" : RISCVCompareMask;
+ }
defm vle : RISCVUSLoad;
defm vleff : RISCVUSLoad;
defm vnsrl : RISCVBinaryABX;
defm vnsra : RISCVBinaryABX;
+ defm vmseq : RISCVCompare;
+ defm vmsne : RISCVCompare;
+ defm vmsltu : RISCVCompare;
+ defm vmslt : RISCVCompare;
+ defm vmsleu : RISCVCompare;
+ defm vmsle : RISCVCompare;
+ defm vmsgtu : RISCVCompare;
+ defm vmsgt : RISCVCompare;
+
defm vminu : RISCVBinaryAAX;
defm vmin : RISCVBinaryAAX;
defm vmaxu : RISCVBinaryAAX;
defm vnclipu : RISCVSaturatingBinaryABX;
defm vnclip : RISCVSaturatingBinaryABX;
+
+ defm vmfeq : RISCVCompare;
+ defm vmfne : RISCVCompare;
+ defm vmflt : RISCVCompare;
+ defm vmfle : RISCVCompare;
+ defm vmfgt : RISCVCompare;
+ defm vmfge : RISCVCompare;
+
} // TargetPrefix = "riscv"
}
}
+// The destination EEW is 1.
+// The source EEW is 8, 16, 32, or 64.
+// When the destination EEW is different from source EEW, we need to use
+// @earlyclobber to avoid the overlap between destination and source registers.
+multiclass VPseudoBinaryM_VV {
+ foreach m = MxList.m in
+ defm _VV : VPseudoBinary<VR, m.vrclass, m.vrclass, m, "@earlyclobber $rd">;
+}
+
+multiclass VPseudoBinaryM_VX<bit IsFloat> {
+ foreach m = MxList.m in
+ defm !if(!eq(IsFloat, 0), "_VX", "_VF") :
+ VPseudoBinary<VR, m.vrclass, !if(!eq(IsFloat, 0), GPR, FPR32), m,
+ "@earlyclobber $rd">;
+}
+
+multiclass VPseudoBinaryM_VI {
+ foreach m = MxList.m in
+ defm _VI : VPseudoBinary<VR, m.vrclass, simm5, m, "@earlyclobber $rd">;
+}
+
multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5> {
defm "" : VPseudoBinaryV_VV;
defm "" : VPseudoBinaryV_VX</*IsFloat=*/0>;
defm "" : VPseudoTernaryV_VI<ImmType, Constraint>;
}
+multiclass VPseudoBinaryM_VV_VX_VI {
+ defm "" : VPseudoBinaryM_VV;
+ defm "" : VPseudoBinaryM_VX</*IsFloat=*/0>;
+ defm "" : VPseudoBinaryM_VI;
+}
+
+multiclass VPseudoBinaryM_VV_VX<bit IsFloat = 0> {
+ defm "" : VPseudoBinaryM_VV;
+ defm "" : VPseudoBinaryM_VX<IsFloat>;
+}
+
+multiclass VPseudoBinaryM_VX_VI {
+ defm "" : VPseudoBinaryM_VX</*IsFloat=*/0>;
+ defm "" : VPseudoBinaryM_VI;
+}
+
//===----------------------------------------------------------------------===//
// Helpers to define the SDNode patterns.
//===----------------------------------------------------------------------===//
vti.RegClass, simm5>;
}
+multiclass VPatBinaryM_VV<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist> {
+ foreach vti = vtilist in
+ defm : VPatBinary<intrinsic, instruction, "VV",
+ vti.Mask, vti.Vector, vti.Vector, vti.Mask,
+ vti.SEW, vti.LMul, VR,
+ vti.RegClass, vti.RegClass>;
+}
+
+multiclass VPatBinaryM_VX<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist> {
+ foreach vti = vtilist in
+ defm : VPatBinary<intrinsic, instruction,
+ !if(!eq(vti.Scalar, XLenVT), "VX", "VF"),
+ vti.Mask, vti.Vector, vti.Scalar, vti.Mask,
+ vti.SEW, vti.LMul, VR,
+ vti.RegClass, vti.ScalarRegClass>;
+}
+
+multiclass VPatBinaryM_VI<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist> {
+ foreach vti = vtilist in
+ defm : VPatBinary<intrinsic, instruction, "VI",
+ vti.Mask, vti.Vector, XLenVT, vti.Mask,
+ vti.SEW, vti.LMul, VR,
+ vti.RegClass, simm5>;
+}
+
multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
list<VTypeInfo> vtilist, Operand ImmType = simm5>
{
defm "" : VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
}
+multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist>
+{
+ defm "" : VPatBinaryM_VV<intrinsic, instruction, vtilist>;
+ defm "" : VPatBinaryM_VX<intrinsic, instruction, vtilist>;
+ defm "" : VPatBinaryM_VI<intrinsic, instruction, vtilist>;
+}
+
+multiclass VPatBinaryM_VV_VX<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist>
+{
+ defm "" : VPatBinaryM_VV<intrinsic, instruction, vtilist>;
+ defm "" : VPatBinaryM_VX<intrinsic, instruction, vtilist>;
+}
+
+multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist>
+{
+ defm "" : VPatBinaryM_VX<intrinsic, instruction, vtilist>;
+ defm "" : VPatBinaryM_VI<intrinsic, instruction, vtilist>;
+}
+
//===----------------------------------------------------------------------===//
// Pseudo instructions and patterns.
//===----------------------------------------------------------------------===//
defm PseudoVNSRL : VPseudoBinaryV_WV_WX_WI;
defm PseudoVNSRA : VPseudoBinaryV_WV_WX_WI;
+//===----------------------------------------------------------------------===//
+// 12.8. Vector Integer Comparison Instructions
+//===----------------------------------------------------------------------===//
+defm PseudoVMSEQ : VPseudoBinaryM_VV_VX_VI;
+defm PseudoVMSNE : VPseudoBinaryM_VV_VX_VI;
+defm PseudoVMSLTU : VPseudoBinaryM_VV_VX;
+defm PseudoVMSLT : VPseudoBinaryM_VV_VX;
+defm PseudoVMSLEU : VPseudoBinaryM_VV_VX_VI;
+defm PseudoVMSLE : VPseudoBinaryM_VV_VX_VI;
+defm PseudoVMSGTU : VPseudoBinaryM_VX_VI;
+defm PseudoVMSGT : VPseudoBinaryM_VX_VI;
+
//===----------------------------------------------------------------------===//
// 12.9. Vector Integer Min/Max Instructions
//===----------------------------------------------------------------------===//
defm PseudoVFSGNJN : VPseudoBinaryV_VV_VX</*IsFloat=*/1>;
defm PseudoVFSGNJX : VPseudoBinaryV_VV_VX</*IsFloat=*/1>;
+//===----------------------------------------------------------------------===//
+// 14.13. Vector Floating-Point Compare Instructions
+//===----------------------------------------------------------------------===//
+defm PseudoVMFEQ : VPseudoBinaryM_VV_VX</*IsFloat=*/1>;
+defm PseudoVMFNE : VPseudoBinaryM_VV_VX</*IsFloat=*/1>;
+defm PseudoVMFLT : VPseudoBinaryM_VV_VX</*IsFloat=*/1>;
+defm PseudoVMFLE : VPseudoBinaryM_VV_VX</*IsFloat=*/1>;
+defm PseudoVMFGT : VPseudoBinaryM_VX</*IsFloat=*/1>;
+defm PseudoVMFGE : VPseudoBinaryM_VX</*IsFloat=*/1>;
} // Predicates = [HasStdExtV, HasStdExtF]
//===----------------------------------------------------------------------===//
defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>;
defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>;
+//===----------------------------------------------------------------------===//
+// 12.8. Vector Integer Comparison Instructions
+//===----------------------------------------------------------------------===//
+defm "" : VPatBinaryM_VV_VX_VI<"int_riscv_vmseq", "PseudoVMSEQ", AllIntegerVectors>;
+defm "" : VPatBinaryM_VV_VX_VI<"int_riscv_vmsne", "PseudoVMSNE", AllIntegerVectors>;
+defm "" : VPatBinaryM_VV_VX<"int_riscv_vmsltu", "PseudoVMSLTU", AllIntegerVectors>;
+defm "" : VPatBinaryM_VV_VX<"int_riscv_vmslt", "PseudoVMSLT", AllIntegerVectors>;
+defm "" : VPatBinaryM_VV_VX_VI<"int_riscv_vmsleu", "PseudoVMSLEU", AllIntegerVectors>;
+defm "" : VPatBinaryM_VV_VX_VI<"int_riscv_vmsle", "PseudoVMSLE", AllIntegerVectors>;
+
+defm "" : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>;
+defm "" : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
+
//===----------------------------------------------------------------------===//
// 12.9. Vector Integer Min/Max Instructions
//===----------------------------------------------------------------------===//
defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN", AllFloatVectors>;
defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX", AllFloatVectors>;
+//===----------------------------------------------------------------------===//
+// 14.13. Vector Floating-Point Compare Instructions
+//===----------------------------------------------------------------------===//
+defm "" : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ", AllFloatVectors>;
+defm "" : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE", AllFloatVectors>;
+defm "" : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT", AllFloatVectors>;
+defm "" : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>;
+defm "" : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>;
+defm "" : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>;
+
} // Predicates = [HasStdExtV, HasStdExtF]
//===----------------------------------------------------------------------===//
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x half> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x half> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x half> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x half> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x float> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x float> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x float> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x float> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16(
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x half> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16(
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x half> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x half> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x half> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32(
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x float> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32(
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x float> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x float> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x float> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64(
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x double> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2f64_nxv2f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x double> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4f64_nxv4f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfeq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x double> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfeq.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfge_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfge_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv1f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv2f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_vf_nxv4f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfge.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfgt_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfgt_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfgt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x half> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x half> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x half> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x half> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x float> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x float> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x float> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x float> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16(
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x half> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16(
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x half> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x half> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x half> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32(
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x float> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32(
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x float> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x float> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x float> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64(
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x double> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv2f64_nxv2f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x double> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vv_nxv4f64_nxv4f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x double> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv1f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv2f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_vf_nxv4f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfle.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x half> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x half> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x half> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x half> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x float> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x float> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x float> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x float> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16(
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x half> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16(
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x half> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x half> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x half> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32(
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x float> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32(
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x float> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x float> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x float> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64(
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x double> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv2f64_nxv2f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x double> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vv_nxv4f64_nxv4f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmflt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x double> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv1f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv2f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_vf_nxv4f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmflt.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x half> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x half> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x half> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x half> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x float> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x float> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x float> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x float> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16(
+ <vscale x 1 x half> %1,
+ <vscale x 1 x half> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %2,
+ <vscale x 1 x half> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16(
+ <vscale x 2 x half> %1,
+ <vscale x 2 x half> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %2,
+ <vscale x 2 x half> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16(
+ <vscale x 4 x half> %1,
+ <vscale x 4 x half> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %2,
+ <vscale x 4 x half> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
+ <vscale x 8 x half> %1,
+ <vscale x 8 x half> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %2,
+ <vscale x 8 x half> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
+ <vscale x 16 x half> %1,
+ <vscale x 16 x half> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %2,
+ <vscale x 16 x half> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32(
+ <vscale x 1 x float> %1,
+ <vscale x 1 x float> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %2,
+ <vscale x 1 x float> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32(
+ <vscale x 2 x float> %1,
+ <vscale x 2 x float> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %2,
+ <vscale x 2 x float> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
+ <vscale x 4 x float> %1,
+ <vscale x 4 x float> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %2,
+ <vscale x 4 x float> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
+ <vscale x 8 x float> %1,
+ <vscale x 8 x float> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %2,
+ <vscale x 8 x float> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vv_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64(
+ <vscale x 1 x double> %1,
+ <vscale x 1 x double> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %2,
+ <vscale x 1 x double> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vv_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv2f64_nxv2f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
+ <vscale x 2 x double> %1,
+ <vscale x 2 x double> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %2,
+ <vscale x 2 x double> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vv_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vv_nxv4f64_nxv4f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
+ <vscale x 4 x double> %1,
+ <vscale x 4 x double> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %2,
+ <vscale x 4 x double> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
+ <vscale x 1 x half>,
+ half,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f16.f16(
+ <vscale x 1 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x half>,
+ half,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x half> %1,
+ half %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16(
+ <vscale x 2 x half>,
+ half,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f16.f16(
+ <vscale x 2 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x half>,
+ half,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x half> %1,
+ half %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16(
+ <vscale x 4 x half>,
+ half,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f16.f16(
+ <vscale x 4 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x half>,
+ half,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x half> %1,
+ half %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16(
+ <vscale x 8 x half>,
+ half,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16.f16(
+ <vscale x 8 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x half>,
+ half,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x half> %1,
+ half %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16(
+ <vscale x 16 x half>,
+ half,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_vf_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16.f16(
+ <vscale x 16 x half> %0,
+ half %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x half>,
+ half,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x half> %1,
+ half %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32(
+ <vscale x 1 x float>,
+ float,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f32.f32(
+ <vscale x 1 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x float>,
+ float,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x float> %1,
+ float %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32(
+ <vscale x 2 x float>,
+ float,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f32.f32(
+ <vscale x 2 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x float>,
+ float,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x float> %1,
+ float %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(
+ <vscale x 4 x float>,
+ float,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32.f32(
+ <vscale x 4 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x float>,
+ float,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x float> %1,
+ float %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32(
+ <vscale x 8 x float>,
+ float,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_vf_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32.f32(
+ <vscale x 8 x float> %0,
+ float %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x float>,
+ float,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x float> %1,
+ float %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
+ <vscale x 1 x double>,
+ double,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_vf_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv1f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.nxv1f64.f64(
+ <vscale x 1 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x double>,
+ double,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x double> %1,
+ double %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
+ <vscale x 2 x double>,
+ double,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_vf_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv2f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64.f64(
+ <vscale x 2 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x double>,
+ double,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x double> %1,
+ double %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
+ <vscale x 4 x double>,
+ double,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_vf_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_vf_nxv4f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64.f64(
+ <vscale x 4 x double> %0,
+ double %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x double>,
+ double,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmfne.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x double> %1,
+ double %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i8> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i8> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i8> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i8> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i8> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmseq_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i8> %3,
+ <vscale x 32 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i16> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i16> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i16> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i16> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i32> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i32> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i32> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmseq_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmseq_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i8> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8(
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i8> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8(
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i8> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8(
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i8> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i8> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmseq_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i8> %3,
+ <vscale x 32 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16(
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16(
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i16> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16(
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i16> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i16> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i16> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32(
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32(
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i32> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i32> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i32> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i64> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i64> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vv_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmseq.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i64> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmseq_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmseq.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmseq_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmseq_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmseq_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmseq.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsgt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsgt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsgt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsgt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsgt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsgt.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsgtu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsgtu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsgtu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsgtu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsgtu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsgtu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i8> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i8> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i8> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i8> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i8> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsle_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i8> %3,
+ <vscale x 32 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i16> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i16> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i16> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i16> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i32> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i32> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i32> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsle_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsle_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i8> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8(
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i8> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8(
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i8> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8(
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i8> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i8> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsle_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i8> %3,
+ <vscale x 32 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16(
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16(
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i16> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16(
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i16> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i16> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i16> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32(
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32(
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i32> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i32> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i32> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i64> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i64> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vv_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsle.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i64> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsle_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsle.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsle_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsle_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsle_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i8> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i8> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i8> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i8> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i8> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsleu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i8> %3,
+ <vscale x 32 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i16> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i16> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i16> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i16> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i32> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i32> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i32> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsleu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsleu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i8> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8(
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i8> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8(
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i8> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8(
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i8> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i8> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsleu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i8> %3,
+ <vscale x 32 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16(
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16(
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i16> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16(
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i16> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i16> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i16> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32(
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32(
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i32> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i32> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i32> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i64> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i64> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vv_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsleu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i64> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsleu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsleu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsleu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i8> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i8> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i8> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i8> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i8> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmslt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i8> %3,
+ <vscale x 32 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i16> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i16> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i16> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i16> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i32> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i32> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i32> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmslt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i8> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i8> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i8> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i8> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i8> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmslt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i8> %3,
+ <vscale x 32 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i16> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i16> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i16> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i16> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i32> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i32> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i32> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i64> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i64> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmslt.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i64> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmslt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmslt.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i8> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i8> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i8> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i8> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i8> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i8> %3,
+ <vscale x 32 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i16> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i16> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i16> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i16> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i32> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i32> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i32> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i8> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8(
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i8> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8(
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i8> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8(
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i8> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i8> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i8> %3,
+ <vscale x 32 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16(
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16(
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i16> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16(
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i16> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i16> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i16> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32(
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32(
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i32> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i32> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i32> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i64> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i64> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vv_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsltu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i64> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsltu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i8> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i8> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i8> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i8> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i8> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsne_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i32 %4)
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i8> %3,
+ <vscale x 32 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i16> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i16> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i16> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i32 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i16> %3,
+ <vscale x 16 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i32> %3,
+ <vscale x 2 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i32> %3,
+ <vscale x 4 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i32 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i32> %3,
+ <vscale x 8 x i1> %mask,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsne_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsne_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
--- /dev/null
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8(
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i8> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8(
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i8> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8(
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i8> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8(
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i8> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i8> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsne_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i64 %4)
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i8> %3,
+ <vscale x 32 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16(
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i16> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16(
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i16> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16(
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i16> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i16> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i64 %4)
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i16> %3,
+ <vscale x 16 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32(
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i32> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32(
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i32> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i32> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i64 %4)
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i32> %3,
+ <vscale x 8 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64(
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i64> %3,
+ <vscale x 1 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i64> %3,
+ <vscale x 2 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vv_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ i64 %4)
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i64> %3,
+ <vscale x 4 x i1> %mask,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsne_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsne.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsne_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsne_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsne_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsne.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}