SDValue Vfirst =
DAG.getNode(RISCVISD::VFIRST_VL, DL, XLenVT, Vec, Mask, VL);
return DAG.getSetCC(DL, XLenVT, Vfirst, DAG.getConstant(0, DL, XLenVT),
- ISD::SETNE);
+ ISD::SETEQ);
}
if (VecVT.isFixedLengthVector()) {
unsigned NumElts = VecVT.getVectorNumElements();
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
%b = icmp eq <vscale x 1 x i8> %a, zeroinitializer
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
%b = icmp eq <vscale x 2 x i8> %a, zeroinitializer
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
%b = icmp eq <vscale x 4 x i8> %a, zeroinitializer
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 8 x i8>, <vscale x 8 x i8>* %x
%b = icmp eq <vscale x 8 x i8> %a, zeroinitializer
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-NEXT: vmseq.vi v10, v8, 0
; CHECK-NEXT: vfirst.m a0, v10
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 16 x i8>, <vscale x 16 x i8>* %x
%b = icmp eq <vscale x 16 x i8> %a, zeroinitializer
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vmseq.vi v12, v8, 0
; CHECK-NEXT: vfirst.m a0, v12
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 32 x i8>, <vscale x 32 x i8>* %x
%b = icmp eq <vscale x 32 x i8> %a, zeroinitializer
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vi v16, v8, 0
; CHECK-NEXT: vfirst.m a0, v16
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <vscale x 64 x i8>, <vscale x 64 x i8>* %x
%b = icmp eq <vscale x 64 x i8> %a, zeroinitializer
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <1 x i8>, ptr %x
%b = icmp eq <1 x i8> %a, zeroinitializer
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <2 x i8>, ptr %x
%b = icmp eq <2 x i8> %a, zeroinitializer
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <4 x i8>, ptr %x
%b = icmp eq <4 x i8> %a, zeroinitializer
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <8 x i8>, ptr %x
%b = icmp eq <8 x i8> %a, zeroinitializer
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v8, v8, 0
; CHECK-NEXT: vfirst.m a0, v8
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <16 x i8>, ptr %x
%b = icmp eq <16 x i8> %a, zeroinitializer
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v10, v8, 0
; CHECK-NEXT: vfirst.m a0, v10
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
%b = icmp eq <32 x i8> %a, zeroinitializer
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v12, v8, 0
; CHECK-NEXT: vfirst.m a0, v12
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <64 x i8>, ptr %x
%b = icmp eq <64 x i8> %a, zeroinitializer
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v16, v8, 0
; CHECK-NEXT: vfirst.m a0, v16
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <128 x i8>, ptr %x
%b = icmp eq <128 x i8> %a, zeroinitializer
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v16, v8, 0
; CHECK-NEXT: vfirst.m a0, v16
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%a = load <256 x i8>, ptr %x
%b = icmp eq <256 x i8> %a, zeroinitializer
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a1, v0
-; RV64ZVE32F-NEXT: beqz a1, .LBB0_2
+; RV64ZVE32F-NEXT: bnez a1, .LBB0_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vle8.v v8, (a0)
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a1, v0
-; RV64ZVE32F-NEXT: beqz a1, .LBB13_2
+; RV64ZVE32F-NEXT: bnez a1, .LBB13_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vle16.v v8, (a0)
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a1, v0
-; RV64ZVE32F-NEXT: beqz a1, .LBB27_2
+; RV64ZVE32F-NEXT: bnez a1, .LBB27_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vle32.v v8, (a0)
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; RV32ZVE32F-NEXT: vfirst.m a2, v0
-; RV32ZVE32F-NEXT: beqz a2, .LBB42_2
+; RV32ZVE32F-NEXT: bnez a2, .LBB42_2
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a2, v0
-; RV64ZVE32F-NEXT: beqz a2, .LBB42_2
+; RV64ZVE32F-NEXT: bnez a2, .LBB42_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: ld a1, 0(a0)
; RV64ZVE32F-NEXT: .LBB42_2: # %else
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a1, v0
-; RV64ZVE32F-NEXT: beqz a1, .LBB58_2
+; RV64ZVE32F-NEXT: bnez a1, .LBB58_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vle16.v v8, (a0)
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a1, v0
-; RV64ZVE32F-NEXT: beqz a1, .LBB68_2
+; RV64ZVE32F-NEXT: bnez a1, .LBB68_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vle32.v v8, (a0)
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; RV32ZVE32F-NEXT: vfirst.m a0, v0
-; RV32ZVE32F-NEXT: beqz a0, .LBB81_2
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_2
; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a1, v0
-; RV64ZVE32F-NEXT: beqz a1, .LBB81_2
+; RV64ZVE32F-NEXT: bnez a1, .LBB81_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.load
; RV64ZVE32F-NEXT: fld fa0, 0(a0)
; RV64ZVE32F-NEXT: .LBB81_2: # %else
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a1, v0
-; RV64ZVE32F-NEXT: beqz a1, .LBB0_2
+; RV64ZVE32F-NEXT: bnez a1, .LBB0_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vse8.v v8, (a0)
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a1, v0
-; RV64ZVE32F-NEXT: beqz a1, .LBB10_2
+; RV64ZVE32F-NEXT: bnez a1, .LBB10_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vse16.v v8, (a0)
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a1, v0
-; RV64ZVE32F-NEXT: beqz a1, .LBB22_2
+; RV64ZVE32F-NEXT: bnez a1, .LBB22_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; RV32ZVE32F-NEXT: vfirst.m a2, v0
-; RV32ZVE32F-NEXT: beqz a2, .LBB36_2
+; RV32ZVE32F-NEXT: bnez a2, .LBB36_2
; RV32ZVE32F-NEXT: # %bb.1: # %cond.store
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a2, v0
-; RV64ZVE32F-NEXT: beqz a2, .LBB36_2
+; RV64ZVE32F-NEXT: bnez a2, .LBB36_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-NEXT: sd a0, 0(a1)
; RV64ZVE32F-NEXT: .LBB36_2: # %else
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a1, v0
-; RV64ZVE32F-NEXT: beqz a1, .LBB52_2
+; RV64ZVE32F-NEXT: bnez a1, .LBB52_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vse16.v v8, (a0)
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a1, v0
-; RV64ZVE32F-NEXT: beqz a1, .LBB62_2
+; RV64ZVE32F-NEXT: bnez a1, .LBB62_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v8, (a0)
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; RV32ZVE32F-NEXT: vfirst.m a0, v0
-; RV32ZVE32F-NEXT: beqz a0, .LBB75_2
+; RV32ZVE32F-NEXT: bnez a0, .LBB75_2
; RV32ZVE32F-NEXT: # %bb.1: # %cond.store
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s a0, v8
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vfirst.m a1, v0
-; RV64ZVE32F-NEXT: beqz a1, .LBB75_2
+; RV64ZVE32F-NEXT: bnez a1, .LBB75_2
; RV64ZVE32F-NEXT: # %bb.1: # %cond.store
; RV64ZVE32F-NEXT: fsd fa0, 0(a0)
; RV64ZVE32F-NEXT: .LBB75_2: # %else
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vfirst.m a1, v0
-; CHECK-NEXT: snez a1, a1
+; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: vmv.x.s a2, v0
; CHECK-NEXT: andi a3, a2, 2
; CHECK-NEXT: or a1, a1, a3
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a0, v0
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.or.v1i1(<1 x i1> %v)
ret i1 %red
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a0, v0
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.xor.v1i1(<1 x i1> %v)
ret i1 %red
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a0, v0
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.and.v1i1(<1 x i1> %v)
ret i1 %red
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a0, v0
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.umax.v1i1(<1 x i1> %v)
ret i1 %red
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a0, v0
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.smax.v1i1(<1 x i1> %v)
ret i1 %red
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a0, v0
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.umin.v1i1(<1 x i1> %v)
ret i1 %red
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a0, v0
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.smin.v1i1(<1 x i1> %v)
ret i1 %red
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a0, v0
-; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: ret
%red = call i1 @llvm.vector.reduce.add.v1i1(<1 x i1> %v)
ret i1 %red