let VLMul = m.value;
}
-class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF> :
+class VPseudoUSLoadNoMask<VReg RetClass, int EEW, bit isFF, bit DummyMask = 1> :
Pseudo<(outs RetClass:$rd),
(ins GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
- let HasDummyMask = 1;
+ let HasDummyMask = DummyMask;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-class VPseudoUSStoreNoMask<VReg StClass, int EEW>:
+class VPseudoUSStoreNoMask<VReg StClass, int EEW, bit DummyMask = 1>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, AVL:$vl, ixlenimm:$sew),[]>,
RISCVVPseudo,
let hasSideEffects = 0;
let HasVLOp = 1;
let HasSEWOp = 1;
- let HasDummyMask = 1;
+ let HasDummyMask = DummyMask;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoBinaryNoMask<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
- string Constraint> :
+ string Constraint,
+ int DummyMask = 1> :
Pseudo<(outs RetClass:$rd),
(ins Op1Class:$rs2, Op2Class:$rs1, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let Constraints = Constraint;
let HasVLOp = 1;
let HasSEWOp = 1;
- let HasDummyMask = 1;
+ let HasDummyMask = DummyMask;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
multiclass VPseudoLoadMask {
foreach mti = AllMasks in {
let VLMul = mti.LMul.value in {
- def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*isFF*/0>;
+ def "_V_" # mti.BX : VPseudoUSLoadNoMask<VR, /*EEW*/1, /*isFF*/0,
+ /*DummyMask*/0>;
}
}
}
multiclass VPseudoStoreMask {
foreach mti = AllMasks in {
let VLMul = mti.LMul.value in {
- def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1>;
+ def "_V_" # mti.BX : VPseudoUSStoreNoMask<VR, /*EEW*/1, /*DummyMask*/0>;
}
}
}
multiclass VPseudoVALU_MM {
foreach m = MxList in
let VLMul = m.value in {
- def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "">,
+ def "_MM_" # m.MX : VPseudoBinaryNoMask<VR, VR, VR, "", /*DummyMask*/0>,
Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
}
}
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vle16.v v12, (a1)
; CHECK-NEXT: vmflt.vv v16, v12, v8
-; CHECK-NEXT: vmnand.mm v8, v16, v16
+; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a2)
; CHECK-NEXT: ret
%a = load <32 x half>, <32 x half>* %x
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v12, (a1)
; CHECK-NEXT: vmflt.vv v16, v8, v12
-; CHECK-NEXT: vmnand.mm v8, v16, v16
+; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a2)
; CHECK-NEXT: ret
%a = load <16 x float>, <16 x float>* %x
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vle64.v v12, (a1)
; CHECK-NEXT: vmfle.vv v16, v12, v8
-; CHECK-NEXT: vmnand.mm v8, v16, v16
+; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a2)
; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vle16.v v16, (a1)
; CHECK-NEXT: vmfle.vv v24, v8, v16
-; CHECK-NEXT: vmnand.mm v8, v24, v24
+; CHECK-NEXT: vmnot.m v8, v24
; CHECK-NEXT: vsm.v v8, (a2)
; CHECK-NEXT: ret
%a = load <64 x half>, <64 x half>* %x
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vmfgt.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v8, v12, v12
+; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <32 x half>, <32 x half>* %x
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vmflt.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v8, v12, v12
+; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <16 x float>, <16 x float>* %x
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vmfge.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v8, v12, v12
+; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vmfle.vf v16, v8, fa0
-; CHECK-NEXT: vmnand.mm v8, v16, v16
+; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <64 x half>, <64 x half>* %x
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vmflt.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v8, v12, v12
+; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <32 x half>, <32 x half>* %x
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vmfgt.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v8, v12, v12
+; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <16 x float>, <16 x float>* %x
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vmfle.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v8, v12, v12
+; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vmfge.vf v16, v8, fa0
-; CHECK-NEXT: vmnand.mm v8, v16, v16
+; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <64 x half>, <64 x half>* %x
; CHECK-NEXT: li a1, 64
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
-; CHECK-NEXT: vmnand.mm v8, v8, v8
+; CHECK-NEXT: vmnot.m v8, v8
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <64 x i1>, <64 x i1>* %x
; CHECK-LABEL: vpreduce_and_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-LABEL: vpreduce_and_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-LABEL: vpreduce_and_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-LABEL: vpreduce_and_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-LABEL: vpreduce_and_v10i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-LABEL: vpreduce_and_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: mv a3, a2
; CHECK-NEXT: .LBB14_2:
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v8, v8
+; CHECK-NEXT: vmnot.m v8, v8
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vcpop.m a2, v8, v0.t
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB14_4:
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v11, v11
+; CHECK-NEXT: vmnot.m v8, v11
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vcpop.m a1, v8, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfle.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmflt.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"uge", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfle.vv v8, v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ult", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmflt.vv v8, v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f16(<8 x half> %va, <8 x half> %vb, metadata !"ule", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
%vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfle.vv v16, v8, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmflt.vv v16, v8, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"uge", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfle.vv v16, v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ult", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmflt.vv v16, v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%v = call <8 x i1> @llvm.vp.fcmp.v8f64(<8 x double> %va, <8 x double> %vb, metadata !"ule", <8 x i1> %m, i32 %evl)
ret <8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
%vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer
; CHECK-LABEL: vreduce_and_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v0, v0
+; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-LABEL: vreduce_and_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v0, v0
+; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-LABEL: vreduce_and_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v0, v0
+; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-LABEL: vreduce_and_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v0, v0
+; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: li a0, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; LMULMAX8-NEXT: vmnand.mm v8, v0, v0
+; LMULMAX8-NEXT: vmnot.m v8, v0
; LMULMAX8-NEXT: vcpop.m a0, v8
; LMULMAX8-NEXT: seqz a0, a0
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: li a0, 64
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; LMULMAX8-NEXT: vmnand.mm v8, v0, v0
+; LMULMAX8-NEXT: vmnot.m v8, v0
; LMULMAX8-NEXT: vcpop.m a0, v8
; LMULMAX8-NEXT: seqz a0, a0
; LMULMAX8-NEXT: neg a0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfle.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmflt.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfle.vv v8, v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmflt.vv v8, v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, metadata !"ule", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfle.vv v12, v8, v10, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmflt.vv v12, v8, v10, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfle.vv v12, v10, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmflt.vv v12, v10, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, metadata !"ule", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfle.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"ugt", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmflt.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"uge", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfle.vv v8, v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"ult", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfge.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfle.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmflt.vv v8, v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%v = call <vscale x 1 x i1> @llvm.vp.fcmp.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, metadata !"ule", <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfgt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmflt.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 1 x double> %elt.head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfle.vv v24, v8, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v24, v24
+; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"ugt", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfle.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfge.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmflt.vv v24, v8, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v24, v24
+; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"uge", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmflt.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfgt.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfle.vv v24, v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v24, v24
+; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"ult", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfge.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfle.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmflt.vv v24, v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v24, v24
+; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret
%v = call <vscale x 8 x i1> @llvm.vp.fcmp.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, metadata !"ule", <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i1> %v
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfgt.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmflt.vf v16, v8, fa0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfle.vv v12, v8, v10
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%vc = fcmp ugt <vscale x 8 x half> %va, %vb
ret <vscale x 8 x i1> %vc
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vv v12, v8, v10
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%vc = fcmp uge <vscale x 8 x half> %va, %vb
ret <vscale x 8 x i1> %vc
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfle.vv v12, v10, v8
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%vc = fcmp ult <vscale x 8 x half> %va, %vb
ret <vscale x 8 x i1> %vc
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vv v12, v10, v8
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%vc = fcmp ule <vscale x 8 x half> %va, %vb
ret <vscale x 8 x i1> %vc
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfle.vv v16, v8, v12
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%vc = fcmp ugt <vscale x 8 x float> %va, %vb
ret <vscale x 8 x i1> %vc
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfle.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfge.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vv v16, v8, v12
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%vc = fcmp uge <vscale x 8 x float> %va, %vb
ret <vscale x 8 x i1> %vc
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfgt.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfle.vv v16, v12, v8
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%vc = fcmp ult <vscale x 8 x float> %va, %vb
ret <vscale x 8 x i1> %vc
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfge.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfle.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vv v16, v12, v8
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%vc = fcmp ule <vscale x 8 x float> %va, %vb
ret <vscale x 8 x i1> %vc
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmfgt.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x float> poison, float %b, i32 0
%splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfle.vv v24, v8, v16
-; CHECK-NEXT: vmnand.mm v0, v24, v24
+; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret
%vc = fcmp ugt <vscale x 8 x double> %va, %vb
ret <vscale x 8 x i1> %vc
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfle.vf v16, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfge.vf v16, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vv v24, v8, v16
-; CHECK-NEXT: vmnand.mm v0, v24, v24
+; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret
%vc = fcmp uge <vscale x 8 x double> %va, %vb
ret <vscale x 8 x i1> %vc
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfgt.vf v16, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfle.vv v24, v16, v8
-; CHECK-NEXT: vmnand.mm v0, v24, v24
+; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret
%vc = fcmp ult <vscale x 8 x double> %va, %vb
ret <vscale x 8 x i1> %vc
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfge.vf v16, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfle.vf v16, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vv v24, v16, v8
-; CHECK-NEXT: vmnand.mm v0, v24, v24
+; CHECK-NEXT: vmnot.m v0, v24
; CHECK-NEXT: ret
%vc = fcmp ule <vscale x 8 x double> %va, %vb
ret <vscale x 8 x i1> %vc
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfgt.vf v16, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v8, fa0
-; CHECK-NEXT: vmnand.mm v0, v16, v16
+; CHECK-NEXT: vmnot.m v0, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v8, v8
+; CHECK-NEXT: vmnot.m v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.nxv1i64.i64(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v10, v10
+; CHECK-NEXT: vmnot.m v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64.i64(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmnand.mm v0, v12, v12
+; CHECK-NEXT: vmnot.m v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64.i64(
; CHECK-LABEL: vpreduce_and_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-LABEL: vpreduce_and_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-LABEL: vpreduce_and_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-LABEL: vpreduce_and_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-LABEL: vpreduce_and_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-LABEL: vpreduce_and_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-LABEL: vpreduce_and_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vmnand.mm v9, v0, v0
+; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-LABEL: vreduce_and_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v0, v0
+; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-LABEL: vreduce_and_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v0, v0
+; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-LABEL: vreduce_and_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v0, v0
+; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-LABEL: vreduce_and_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v0, v0
+; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-LABEL: vreduce_and_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v0, v0
+; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-LABEL: vreduce_and_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v0, v0
+; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-LABEL: vreduce_and_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
-; CHECK-NEXT: vmnand.mm v8, v0, v0
+; CHECK-NEXT: vmnot.m v8, v0
; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0