return true;
}
-bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
+using ValidateFn = bool (*)(int64_t);
+
+static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal,
+ SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget,
+ ValidateFn ValidateImm) {
if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
N.getOpcode() != RISCVISD::VMV_V_X_VL) ||
int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue();
- // Both ISD::SPLAT_VECTOR and RISCVISD::SPLAT_VECTOR_I64 share semantics when
- // the operand type is wider than the resulting vector element type: an
- // implicit truncation first takes place. Therefore, perform a manual
- // truncation/sign-extension in order to ignore any truncated bits and catch
- // any zero-extended immediate.
+ // ISD::SPLAT_VECTOR, RISCVISD::SPLAT_VECTOR_I64 and RISCVISD::VMV_V_X_VL
+ // share semantics when the operand type is wider than the resulting vector
+ // element type: an implicit truncation first takes place. Therefore, perform
+ // a manual truncation/sign-extension in order to ignore any truncated bits
+ // and catch any zero-extended immediate.
// For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first
// sign-extending to (XLenVT -1).
- MVT XLenVT = Subtarget->getXLenVT();
+ MVT XLenVT = Subtarget.getXLenVT();
assert(XLenVT == N.getOperand(0).getSimpleValueType() &&
"Unexpected splat operand type");
MVT EltVT = N.getSimpleValueType().getVectorElementType();
- if (EltVT.bitsLT(XLenVT)) {
+ if (EltVT.bitsLT(XLenVT))
SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits());
- }
- if (!isInt<5>(SplatImm))
+ if (!ValidateImm(SplatImm))
return false;
- SplatVal = CurDAG->getTargetConstant(SplatImm, SDLoc(N), XLenVT);
+ SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT);
return true;
}
+bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) {
+ return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget,
+ [](int64_t Imm) { return isInt<5>(Imm); });
+}
+
+bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) {
+ return selectVSplatSimmHelper(
+ N, SplatVal, *CurDAG, *Subtarget,
+ [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; });
+}
+
+bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N,
+ SDValue &SplatVal) {
+ return selectVSplatSimmHelper(
+ N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) {
+ return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);
+ });
+}
+
bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) {
if ((N.getOpcode() != ISD::SPLAT_VECTOR &&
N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64 &&
bool selectVSplat(SDValue N, SDValue &SplatVal);
bool selectVSplatSimm5(SDValue N, SDValue &SplatVal);
bool selectVSplatUimm5(SDValue N, SDValue &SplatVal);
+ bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal);
+ bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal);
bool selectRVVSimm5(SDValue N, unsigned Width, SDValue &Imm);
template <unsigned Width> bool selectRVVSimm5(SDValue N, SDValue &Imm) {
def SplatPat : ComplexPattern<vAny, 1, "selectVSplat", [splat_vector, rv32_splat_i64], [], 1>;
def SplatPat_simm5 : ComplexPattern<vAny, 1, "selectVSplatSimm5", [splat_vector, rv32_splat_i64], [], 2>;
def SplatPat_uimm5 : ComplexPattern<vAny, 1, "selectVSplatUimm5", [splat_vector, rv32_splat_i64], [], 2>;
+def SplatPat_simm5_plus1
+ : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1",
+ [splat_vector, rv32_splat_i64], [], 2>;
+def SplatPat_simm5_plus1_nonzero
+ : ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero",
+ [splat_vector, rv32_splat_i64], [], 2>;
class SwapHelper<dag Prefix, dag A, dag B, dag Suffix, bit swap> {
dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix);
SplatPat_simm5, simm5, swap>;
}
+multiclass VPatIntegerSetCCSDNode_VIPlus1<CondCode cc, string instruction_name,
+ ComplexPattern splatpat_kind> {
+ foreach vti = AllIntegerVectors in {
+ defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX);
+ def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1),
+ (vti.Vector (splatpat_kind simm5:$rs2)),
+ cc)),
+ (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2),
+ vti.AVL, vti.SEW)>;
+ }
+}
+
multiclass VPatFPSetCCSDNode_VV_VF_FV<CondCode cc,
string inst_name,
string swapped_op_inst_name> {
defm : VPatIntegerSetCCSDNode_VV_VX_VI<SETEQ, "PseudoVMSEQ">;
defm : VPatIntegerSetCCSDNode_VV_VX_VI<SETNE, "PseudoVMSNE">;
-// FIXME: Support immediate forms of these by choosing SLE decrementing the
-// immediate
defm : VPatIntegerSetCCSDNode_VV_VX<SETLT, "PseudoVMSLT">;
defm : VPatIntegerSetCCSDNode_VV_VX<SETULT, "PseudoVMSLTU">;
+defm : VPatIntegerSetCCSDNode_VIPlus1<SETLT, "PseudoVMSLE",
+ SplatPat_simm5_plus1>;
+defm : VPatIntegerSetCCSDNode_VIPlus1<SETULT, "PseudoVMSLEU",
+ SplatPat_simm5_plus1_nonzero>;
defm : VPatIntegerSetCCSDNode_VV<SETGT, "PseudoVMSLT", /*swap*/1>;
defm : VPatIntegerSetCCSDNode_VV<SETUGT, "PseudoVMSLTU", /*swap*/1>;
defm : VPatIntegerSetCCSDNode_VV_VX_VI<SETLE, "PseudoVMSLE">;
defm : VPatIntegerSetCCSDNode_VV_VX_VI<SETULE, "PseudoVMSLEU">;
-// FIXME: Support immediate forms of these by choosing SGT and decrementing the
-// immediate
defm : VPatIntegerSetCCSDNode_VV<SETGE, "PseudoVMSLE", /*swap*/1>;
defm : VPatIntegerSetCCSDNode_VV<SETUGE, "PseudoVMSLEU", /*swap*/1>;
+defm : VPatIntegerSetCCSDNode_VIPlus1<SETGE, "PseudoVMSGT",
+ SplatPat_simm5_plus1>;
+defm : VPatIntegerSetCCSDNode_VIPlus1<SETUGE, "PseudoVMSGTU",
+ SplatPat_simm5_plus1_nonzero>;
// 12.9. Vector Integer Min/Max Instructions
defm : VPatBinarySDNode_VV_VX<umin, "PseudoVMINU">;
multiclass VPatIntegerSetCCVL_VI_Swappable<VTypeInfo vti, string instruction_name,
CondCode cc, CondCode invcc> {
defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX);
- defvar ImmPat = !cast<ComplexPattern>("sew"#vti.SEW#"simm5");
def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
(SplatPat_simm5 simm5:$rs2), cc,
(vti.Mask true_mask),
(instruction vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.SEW)>;
}
+multiclass VPatIntegerSetCCVL_VIPlus1<VTypeInfo vti, string instruction_name,
+ CondCode cc, ComplexPattern splatpat_kind> {
+ defvar instruction = !cast<Instruction>(instruction_name#"_VI_"#vti.LMul.MX);
+ def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1),
+ (splatpat_kind simm5:$rs2), cc,
+ (vti.Mask true_mask),
+ (XLenVT (VLOp GPR:$vl)))),
+ (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2),
+ GPR:$vl, vti.SEW)>;
+}
+
multiclass VPatFPSetCCVL_VV_VF_FV<CondCode cc,
string inst_name,
string swapped_op_inst_name> {
defm : VPatIntegerSetCCVL_VX_Swappable<vti, "PseudoVMSGTU", SETUGT, SETULT>;
// There is no VMSGE(U)_VX instruction
- // FIXME: Support immediate forms of these by choosing SGT and decrementing
- // the immediate
defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSEQ", SETEQ, SETEQ>;
defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSNE", SETNE, SETNE>;
defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLE", SETLE, SETGE>;
defm : VPatIntegerSetCCVL_VI_Swappable<vti, "PseudoVMSLEU", SETULE, SETUGE>;
+
+ defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSLE", SETLT,
+ SplatPat_simm5_plus1>;
+ defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSLEU", SETULT,
+ SplatPat_simm5_plus1_nonzero>;
+ defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSGT", SETGE,
+ SplatPat_simm5_plus1>;
+ defm : VPatIntegerSetCCVL_VIPlus1<vti, "PseudoVMSGTU", SETUGE,
+ SplatPat_simm5_plus1_nonzero>;
} // foreach vti = AllIntegerVectors
// 12.9. Vector Integer Min/Max Instructions
; CHECK-NEXT: addi a2, zero, 128
; CHECK-NEXT: vsetvli a2, a2, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vmslt.vx v25, v8, zero
+; CHECK-NEXT: vmsle.vi v25, v8, -1
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <128 x i8>, <128 x i8>* %x
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli a2, 8, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
-; CHECK-NEXT: vmv.v.i v26, 0
-; CHECK-NEXT: vmsle.vv v27, v26, v25
-; CHECK-NEXT: vse1.v v27, (a1)
+; CHECK-NEXT: vmsgt.vi v26, v25, -1
+; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <8 x i8>, <8 x i8>* %x
%b = insertelement <8 x i8> undef, i8 0, i32 0
; CHECK-NEXT: addi a2, zero, 64
; CHECK-NEXT: vsetvli a2, a2, e8,m4,ta,mu
; CHECK-NEXT: vle8.v v28, (a0)
-; CHECK-NEXT: addi a0, zero, 5
-; CHECK-NEXT: vmsltu.vx v25, v28, a0
+; CHECK-NEXT: vmsleu.vi v25, v28, 4
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <64 x i8>, <64 x i8>* %x
; CHECK-NEXT: addi a2, zero, 128
; CHECK-NEXT: vsetvli a2, a2, e8,m8,ta,mu
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vmv.v.i v16, 5
-; CHECK-NEXT: vmsleu.vv v25, v16, v8
+; CHECK-NEXT: vmsgtu.vi v25, v8, 4
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <128 x i8>, <128 x i8>* %x
; CHECK-LABEL: saddo_nvx2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
-; CHECK-NEXT: vmslt.vx v25, v9, zero
-; CHECK-NEXT: vadd.vv v26, v8, v9
-; CHECK-NEXT: vmslt.vv v27, v26, v8
+; CHECK-NEXT: vadd.vv v25, v8, v9
+; CHECK-NEXT: vmslt.vv v26, v25, v8
+; CHECK-NEXT: vmsle.vi v27, v9, -1
; CHECK-NEXT: vsetvli a0, zero, e8,mf4,ta,mu
-; CHECK-NEXT: vmxor.mm v0, v25, v27
+; CHECK-NEXT: vmxor.mm v0, v27, v26
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
-; CHECK-NEXT: vmerge.vim v8, v26, 0, v0
+; CHECK-NEXT: vmerge.vim v8, v25, 0, v0
; CHECK-NEXT: ret
%a = call { <vscale x 2 x i32>, <vscale x 2 x i1> } @llvm.sadd.with.overflow.nxv2i32(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y)
%b = extractvalue { <vscale x 2 x i32>, <vscale x 2 x i1> } %a, 0
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
-; FIXME: The scalar/vector operations ('xv' and 'iv' tests) should swap
-; operands and condition codes accordingly in order to generate a 'vx' or 'vi'
-; instruction.
-
define <vscale x 8 x i1> @icmp_eq_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: icmp_eq_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-LABEL: icmp_uge_vi_nxv8i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.i v25, 15
-; CHECK-NEXT: vmsleu.vv v0, v25, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 14
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 15, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i8_3:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.i v25, 1
-; CHECK-NEXT: vmsleu.vv v0, v25, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 1, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i8_4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.i v25, -15
-; CHECK-NEXT: vmsleu.vv v0, v25, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_5(<vscale x 8 x i8> %va) {
; CHECK-LABEL: icmp_uge_vi_nxv8i8_5:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.x v25, a0
-; CHECK-NEXT: vmsleu.vv v0, v25, v8
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i8_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i1> %vc
}
+declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(i8, i32);
+
+; Test that we don't optimize ult x, 0 -> ule x, -1
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_5(<vscale x 8 x i8> %va, i32 %vl) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v8, zero
+; CHECK-NEXT: ret
+ %splat = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(i8 0, i32 %vl)
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
define <vscale x 8 x i1> @icmp_ule_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: icmp_ule_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-LABEL: icmp_sge_vi_nxv8i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.i v25, -15
-; CHECK-NEXT: vmsle.vv v0, v25, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i8_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.i v25, 0
-; CHECK-NEXT: vmsle.vv v0, v25, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
; CHECK-LABEL: icmp_sge_vi_nxv8i8_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.x v25, a0
-; CHECK-NEXT: vmsle.vv v0, v25, v8
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_slt_vi_nxv8i8_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, zero
+; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i8_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i16_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.i v26, 15
-; CHECK-NEXT: vmsleu.vv v0, v26, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 14
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i16_3:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.i v26, 1
-; CHECK-NEXT: vmsleu.vv v0, v26, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i16_4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.i v26, -15
-; CHECK-NEXT: vmsleu.vv v0, v26, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_5(<vscale x 8 x i16> %va) {
; CHECK-LABEL: icmp_uge_vi_nxv8i16_5:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.x v26, a0
-; CHECK-NEXT: vmsleu.vv v0, v26, v8
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i1> %vc
}
+; Test that we don't optimize uge x, 0 -> ugt x, -1
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_6(<vscale x 8 x i8> %va, i32 %vl) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 0
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v25, v8
+; CHECK-NEXT: ret
+ %splat = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(i8 0, i32 %vl)
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
define <vscale x 8 x i1> @icmp_ult_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
; CHECK-LABEL: icmp_ult_vv_nxv8i16:
; CHECK: # %bb.0:
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i16_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i16_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.i v26, -15
-; CHECK-NEXT: vmsle.vv v0, v26, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i16_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.i v26, 0
-; CHECK-NEXT: vmsle.vv v0, v26, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
; CHECK-LABEL: icmp_sge_vi_nxv8i16_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.x v26, a0
-; CHECK-NEXT: vmsle.vv v0, v26, v8
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_slt_vi_nxv8i16_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, zero
+; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i16_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.i v28, 15
-; CHECK-NEXT: vmsleu.vv v0, v28, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 14
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 15, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i32_3:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.i v28, 1
-; CHECK-NEXT: vmsleu.vv v0, v28, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 1, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i32_4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.i v28, -15
-; CHECK-NEXT: vmsleu.vv v0, v28, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_5(<vscale x 8 x i32> %va) {
; CHECK-LABEL: icmp_uge_vi_nxv8i32_5:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.x v28, a0
-; CHECK-NEXT: vmsleu.vv v0, v28, v8
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.i v28, -15
-; CHECK-NEXT: vmsle.vv v0, v28, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i32_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.i v28, 0
-; CHECK-NEXT: vmsle.vv v0, v28, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
; CHECK-LABEL: icmp_sge_vi_nxv8i32_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.x v28, a0
-; CHECK-NEXT: vmsle.vv v0, v28, v8
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_slt_vi_nxv8i32_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, zero
+; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i32_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.i v16, 15
-; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 14
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 15, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i64_3:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.i v16, 1
-; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i64_4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.i v16, -15
-; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_5(<vscale x 8 x i64> %va) {
; CHECK-LABEL: icmp_uge_vi_nxv8i64_5:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i64_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i64_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.i v16, -15
-; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.i v16, 0
-; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
; CHECK-LABEL: icmp_sge_vi_nxv8i64_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i64_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_slt_vi_nxv8i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, zero
+; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i64_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
-; FIXME: The scalar/vector operations ('xv' and 'iv' tests) should swap
-; operands and condition codes accordingly in order to generate a 'vx' or 'vi'
-; instruction.
-
define <vscale x 8 x i1> @icmp_eq_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: icmp_eq_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-LABEL: icmp_uge_vi_nxv8i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.i v25, 15
-; CHECK-NEXT: vmsleu.vv v0, v25, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 14
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 15, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i8_3:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.i v25, 1
-; CHECK-NEXT: vmsleu.vv v0, v25, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 1, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i8_4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.i v25, -15
-; CHECK-NEXT: vmsleu.vv v0, v25, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_5(<vscale x 8 x i8> %va) {
; CHECK-LABEL: icmp_uge_vi_nxv8i8_5:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.x v25, a0
-; CHECK-NEXT: vmsleu.vv v0, v25, v8
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i1> %vc
}
+; Test that we don't optimize uge x, 0 -> ugt x, -1
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_6(<vscale x 8 x i8> %va, i64 %vl) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 0
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v25, v8
+; CHECK-NEXT: ret
+ %splat = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(i8 0, i64 %vl)
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
define <vscale x 8 x i1> @icmp_ult_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: icmp_ult_vv_nxv8i8:
; CHECK: # %bb.0:
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i8_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i1> %vc
}
+declare <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(i8, i64);
+
+; Test that we don't optimize ult x, 0 -> ule x, -1
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_5(<vscale x 8 x i8> %va, i64 %vl) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v8, zero
+; CHECK-NEXT: ret
+ %splat = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8(i8 0, i64 %vl)
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
define <vscale x 8 x i1> @icmp_ule_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
; CHECK-LABEL: icmp_ule_vv_nxv8i8:
; CHECK: # %bb.0:
; CHECK-LABEL: icmp_sge_vi_nxv8i8_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.i v25, -15
-; CHECK-NEXT: vmsle.vv v0, v25, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i8_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.i v25, 0
-; CHECK-NEXT: vmsle.vv v0, v25, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
; CHECK-LABEL: icmp_sge_vi_nxv8i8_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmv.v.x v25, a0
-; CHECK-NEXT: vmsle.vv v0, v25, v8
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_slt_vi_nxv8i8_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, zero
+; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i8_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
%splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i16_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.i v26, 15
-; CHECK-NEXT: vmsleu.vv v0, v26, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 14
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i16_3:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.i v26, 1
-; CHECK-NEXT: vmsleu.vv v0, v26, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i16_4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.i v26, -15
-; CHECK-NEXT: vmsleu.vv v0, v26, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_5(<vscale x 8 x i16> %va) {
; CHECK-LABEL: icmp_uge_vi_nxv8i16_5:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.x v26, a0
-; CHECK-NEXT: vmsleu.vv v0, v26, v8
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i16_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i16_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.i v26, -15
-; CHECK-NEXT: vmsle.vv v0, v26, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i16_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.i v26, 0
-; CHECK-NEXT: vmsle.vv v0, v26, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
; CHECK-LABEL: icmp_sge_vi_nxv8i16_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmv.v.x v26, a0
-; CHECK-NEXT: vmsle.vv v0, v26, v8
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_slt_vi_nxv8i16_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, zero
+; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i16_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
%splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.i v28, 15
-; CHECK-NEXT: vmsleu.vv v0, v28, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 14
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 15, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i32_3:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.i v28, 1
-; CHECK-NEXT: vmsleu.vv v0, v28, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 1, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i32_4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.i v28, -15
-; CHECK-NEXT: vmsleu.vv v0, v28, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_5(<vscale x 8 x i32> %va) {
; CHECK-LABEL: icmp_uge_vi_nxv8i32_5:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.x v28, a0
-; CHECK-NEXT: vmsleu.vv v0, v28, v8
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i32_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.i v28, -15
-; CHECK-NEXT: vmsle.vv v0, v28, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i32_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.i v28, 0
-; CHECK-NEXT: vmsle.vv v0, v28, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
; CHECK-LABEL: icmp_sge_vi_nxv8i32_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmv.v.x v28, a0
-; CHECK-NEXT: vmsle.vv v0, v28, v8
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_slt_vi_nxv8i32_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, zero
+; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i32_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
%splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.i v16, 15
-; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 14
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 15, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i64_3:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.i v16, 1
-; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, 0
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_uge_vi_nxv8i64_4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.i v16, -15
-; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: vmsgtu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_5(<vscale x 8 x i64> %va) {
; CHECK-LABEL: icmp_uge_vi_nxv8i64_5:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i64_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
; CHECK-LABEL: icmp_ult_vi_nxv8i64_4:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmsltu.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i64_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.i v16, -15
-; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_sge_vi_nxv8i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.i v16, 0
-; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: vmsgt.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
; CHECK-LABEL: icmp_sge_vi_nxv8i64_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmv.v.x v16, a0
-; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i64_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, -15
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, -16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
; CHECK-LABEL: icmp_slt_vi_nxv8i64_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, zero
+; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
; CHECK-LABEL: icmp_slt_vi_nxv8i64_3:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 16
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
-; CHECK-NEXT: vmslt.vx v0, v8, a0
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v8, 15
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer