return SDValue(N, 0);
}]>;
+def DecImm : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N),
+ N->getValueType(0));
+}]>;
+
//===----------------------------------------------------------------------===//
// Utilities.
//===----------------------------------------------------------------------===//
defm "" : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>;
defm "" : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
+// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16. This
+// avoids the user needing to know that there is no vmslt(u).vi instruction.
+// This is limited to vmslt(u).vx as there is no vmsge().vx intrinsic or
+// instruction.
+foreach vti = AllIntegerVectors in {
+ def : Pat<(vti.Mask (int_riscv_vmslt (vti.Vector vti.RegClass:$rs1),
+ (vti.Scalar simm5_plus1:$rs2), GPR:$vl)),
+ (!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
+ (DecImm simm5_plus1:$rs2),
+ (NoX0 GPR:$vl),
+ vti.SEW)>;
+ def : Pat<(vti.Mask (int_riscv_vmslt_mask (vti.Mask V0),
+ (vti.Vector vti.RegClass:$rs1),
+ (vti.Scalar simm5_plus1:$rs2),
+ (vti.Mask VR:$merge),
+ GPR:$vl)),
+ (!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX#"_MASK")
+ VR:$merge,
+ vti.RegClass:$rs1,
+ (DecImm simm5_plus1:$rs2),
+ (vti.Mask V0),
+ (NoX0 GPR:$vl),
+ vti.SEW)>;
+
+ def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
+ (vti.Scalar simm5_plus1:$rs2), GPR:$vl)),
+ (!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
+ (DecImm simm5_plus1:$rs2),
+ (NoX0 GPR:$vl),
+ vti.SEW)>;
+ def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask V0),
+ (vti.Vector vti.RegClass:$rs1),
+ (vti.Scalar simm5_plus1:$rs2),
+ (vti.Mask VR:$merge),
+ GPR:$vl)),
+ (!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX#"_MASK")
+ VR:$merge,
+ vti.RegClass:$rs1,
+ (DecImm simm5_plus1:$rs2),
+ (vti.Mask V0),
+ (NoX0 GPR:$vl),
+ vti.SEW)>;
+
+ // Special cases to avoid matching vmsltu.vi 0 (always false) to
+ // vmsleu.vi -1 (always true). Instead match to vmsne.vv.
+ def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
+ (vti.Scalar 0), GPR:$vl)),
+ (!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
+ vti.RegClass:$rs1,
+ (NoX0 GPR:$vl),
+ vti.SEW)>;
+ def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask V0),
+ (vti.Vector vti.RegClass:$rs1),
+ (vti.Scalar 0),
+ (vti.Mask VR:$merge),
+ GPR:$vl)),
+ (!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX#"_MASK")
+ VR:$merge,
+ vti.RegClass:$rs1,
+ vti.RegClass:$rs1,
+ (vti.Mask V0),
+ (NoX0 GPR:$vl),
+ vti.SEW)>;
+}
+
//===----------------------------------------------------------------------===//
// 12.9. Vector Integer Min/Max Instructions
//===----------------------------------------------------------------------===//
ret <vscale x 8 x i1> %a
}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -16
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 -15,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -15, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 -14,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -14
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 -13,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -13, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 -12,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -12
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 -11,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -11, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 -10,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -10
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 -9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 -8,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -8
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 -7,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -7, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 -6,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -6
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 -5,
+ i32 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -5, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 -4,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -4
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 -3,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -3, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 -2,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -2
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 -1,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -1, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 0,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -1
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 0,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 0, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 1,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 1
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 2,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 2, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 3,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 3
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 4,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 4, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 5,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 5
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 6,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 6, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 7,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 7
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 8,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 8, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 10,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 10, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 11,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 11
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 12,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 12, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 13,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
ret <vscale x 4 x i1> %a
}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -16
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 -15,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -15, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 -14,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -14
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 -13,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -13, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 -12,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -12
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 -11,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -11, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 -10,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -10
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 -9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 -8,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -8
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 -7,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -7, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 -6,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -6
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 -5,
+ i64 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -5, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 -4,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -4
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 -3,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -3, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 -2,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -2
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 -1,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -1, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 0,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -1
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 0,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 0, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 1
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 2,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 2, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 3,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 3
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 4,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 4, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 5,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 5
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 6,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 6, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 7,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 7
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 8,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 8, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 10,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 10, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 11,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 11
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 12,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 12, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 13,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 13
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 14,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 14, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 15,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 15
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 16,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -16, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 -15,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -15
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 -14,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
+; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -14, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 -13,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
ret <vscale x 8 x i1> %a
}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -16
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 -15,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -15, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 -14,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -14
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 -13,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -13, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 -12,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -12
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 -11,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -11, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 -10,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -10
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 -9,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 -8,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -8
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 -7,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -7, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 -6,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -6
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 -5,
+ i32 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -5, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 -4,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -4
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 -3,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -3, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 -2,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -2
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 -1,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, [[REG:v[0-9]+]], [[REG]], v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 0,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, [[REG:v[0-9]+]], [[REG]]
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 0,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 0, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 1,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 1
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 2,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 2, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 3,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 3
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 4,
+ i32 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 4, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 5,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 5
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 6,
+ i32 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 6, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 7,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 7
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 8,
+ i32 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 8, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 10,
+ i32 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 10, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 11,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 11
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 12,
+ i32 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 12, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 13,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i1> %a
+}
ret <vscale x 4 x i1> %a
}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -16
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 -15,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -15, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i8> %1,
+ i8 -14,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -14
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 -13,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -13, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i8> %1,
+ i8 -12,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -12
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 -11,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -11, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i8> %1,
+ i8 -10,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -10
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 -9,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i8> %1,
+ i8 -8,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -8
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 -7,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -7, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i8> %1,
+ i8 -6,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -6
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 -5,
+ i64 %1)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -5, v0.t
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i8> %1,
+ i8 -4,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -4
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 -3,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -3, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i16> %1,
+ i16 -2,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -2
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 -1,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, [[REG:v[0-9]+]], [[REG]], v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i16> %1,
+ i16 0,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vmsne.vv {{v[0-9]+}}, [[REG:v[0-9]+]], [[REG]]
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 0,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 0, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i16> %1,
+ i16 1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 1
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 2,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 2, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i16> %1,
+ i16 3,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 3
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 4,
+ i64 %1)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 4, v0.t
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i16> %1,
+ i16 5,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 5
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 6,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 6, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i32> %1,
+ i32 7,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 7
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 8,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 8, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 10,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 10, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i32> %1,
+ i32 11,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 11
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 12,
+ i64 %1)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 12, v0.t
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i32> %1,
+ i32 13,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 13
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 14,
+ i64 %1)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 14, v0.t
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i64> %1,
+ i64 15,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 15
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 16,
+ i64 %1)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -16, v0.t
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i64> %1,
+ i64 -15,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -15
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 -14,
+ i64 %1)
+
+ ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
+; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -14, v0.t
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i64> %1,
+ i64 -13,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i1> %a
+}