return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
}
+/// Return true if this is a setcc instruction that performs an equality
+/// comparison when used with integer operands.
+inline bool isIntEqualitySetCC(CondCode Code) {
+ return Code == SETEQ || Code == SETNE;
+}
+
/// Return true if the specified condition returns true if the two operands to
/// the condition are equal. Note that if one of the two operands is a NaN,
/// this value is meaningless.
// We can use any register for comparisons
setHasMultipleConditionRegisters();
+ setTargetDAGCombine(ISD::SETCC);
if (Subtarget.hasStdExtZbp()) {
setTargetDAGCombine(ISD::OR);
}
SDValue RHS = N->getOperand(1);
auto CCVal = static_cast<ISD::CondCode>(N->getConstantOperandVal(2));
APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
- if ((CCVal == ISD::SETNE || CCVal == ISD::SETEQ) && isNullConstant(RHS) &&
+ if (ISD::isIntEqualitySetCC(CCVal) && isNullConstant(RHS) &&
LHS.getOpcode() == ISD::XOR && isOneConstant(LHS.getOperand(1)) &&
DAG.MaskedValueIsZero(LHS.getOperand(0), Mask)) {
SDLoc DL(N);
}
break;
}
+ case ISD::SETCC: {
+ // (setcc X, 1, setne) -> (setcc X, 0, seteq) if we can prove X is 0/1.
+ // Comparing with 0 may allow us to fold into bnez/beqz.
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+ auto CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
+ APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
+ if (isOneConstant(RHS) && ISD::isIntEqualitySetCC(CC) &&
+ DAG.MaskedValueIsZero(LHS, Mask)) {
+ SDLoc DL(N);
+ SDValue Zero = DAG.getConstant(0, DL, LHS.getValueType());
+ CC = ISD::getSetCCInverse(CC, LHS.getValueType());
+ return DAG.getSetCC(DL, N->getValueType(0), LHS, Zero, CC);
+ }
+ break;
+ }
}
return SDValue();
; RV32IFD-NEXT: flt.d a0, ft1, ft0
; RV32IFD-NEXT: flt.d a1, ft0, ft1
; RV32IFD-NEXT: or a0, a1, a0
-; RV32IFD-NEXT: addi a1, zero, 1
-; RV32IFD-NEXT: bne a0, a1, .LBB9_2
+; RV32IFD-NEXT: beqz a0, .LBB9_2
; RV32IFD-NEXT: # %bb.1: # %if.else
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: flt.d a0, ft1, ft0
; RV64IFD-NEXT: flt.d a1, ft0, ft1
; RV64IFD-NEXT: or a0, a1, a0
-; RV64IFD-NEXT: addi a1, zero, 1
-; RV64IFD-NEXT: bne a0, a1, .LBB9_2
+; RV64IFD-NEXT: beqz a0, .LBB9_2
; RV64IFD-NEXT: # %bb.1: # %if.else
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: feq.d a0, ft1, ft1
; RV32IFD-NEXT: feq.d a1, ft0, ft0
; RV32IFD-NEXT: and a0, a1, a0
-; RV32IFD-NEXT: addi a1, zero, 1
-; RV32IFD-NEXT: bne a0, a1, .LBB15_2
+; RV32IFD-NEXT: beqz a0, .LBB15_2
; RV32IFD-NEXT: # %bb.1: # %if.else
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: feq.d a0, ft1, ft1
; RV64IFD-NEXT: feq.d a1, ft0, ft0
; RV64IFD-NEXT: and a0, a1, a0
-; RV64IFD-NEXT: addi a1, zero, 1
-; RV64IFD-NEXT: bne a0, a1, .LBB15_2
+; RV64IFD-NEXT: beqz a0, .LBB15_2
; RV64IFD-NEXT: # %bb.1: # %if.else
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: flt.s a1, ft0, ft1
; RV32IF-NEXT: or a0, a1, a0
-; RV32IF-NEXT: addi a1, zero, 1
-; RV32IF-NEXT: bne a0, a1, .LBB9_2
+; RV32IF-NEXT: beqz a0, .LBB9_2
; RV32IF-NEXT: # %bb.1: # %if.else
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: flt.s a0, ft1, ft0
; RV64IF-NEXT: flt.s a1, ft0, ft1
; RV64IF-NEXT: or a0, a1, a0
-; RV64IF-NEXT: addi a1, zero, 1
-; RV64IF-NEXT: bne a0, a1, .LBB9_2
+; RV64IF-NEXT: beqz a0, .LBB9_2
; RV64IF-NEXT: # %bb.1: # %if.else
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: feq.s a0, ft1, ft1
; RV32IF-NEXT: feq.s a1, ft0, ft0
; RV32IF-NEXT: and a0, a1, a0
-; RV32IF-NEXT: addi a1, zero, 1
-; RV32IF-NEXT: bne a0, a1, .LBB15_2
+; RV32IF-NEXT: beqz a0, .LBB15_2
; RV32IF-NEXT: # %bb.1: # %if.else
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: feq.s a0, ft1, ft1
; RV64IF-NEXT: feq.s a1, ft0, ft0
; RV64IF-NEXT: and a0, a1, a0
-; RV64IF-NEXT: addi a1, zero, 1
-; RV64IF-NEXT: bne a0, a1, .LBB15_2
+; RV64IF-NEXT: beqz a0, .LBB15_2
; RV64IF-NEXT: # %bb.1: # %if.else
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: flt.h a0, fa0, fa1
; RV32IZFH-NEXT: flt.h a1, fa1, fa0
; RV32IZFH-NEXT: or a0, a1, a0
-; RV32IZFH-NEXT: addi a1, zero, 1
-; RV32IZFH-NEXT: bne a0, a1, .LBB9_2
+; RV32IZFH-NEXT: beqz a0, .LBB9_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: flt.h a0, fa0, fa1
; RV64IZFH-NEXT: flt.h a1, fa1, fa0
; RV64IZFH-NEXT: or a0, a1, a0
-; RV64IZFH-NEXT: addi a1, zero, 1
-; RV64IZFH-NEXT: bne a0, a1, .LBB9_2
+; RV64IZFH-NEXT: beqz a0, .LBB9_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: feq.h a0, fa1, fa1
; RV32IZFH-NEXT: feq.h a1, fa0, fa0
; RV32IZFH-NEXT: and a0, a1, a0
-; RV32IZFH-NEXT: addi a1, zero, 1
-; RV32IZFH-NEXT: bne a0, a1, .LBB15_2
+; RV32IZFH-NEXT: beqz a0, .LBB15_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: feq.h a0, fa1, fa1
; RV64IZFH-NEXT: feq.h a1, fa0, fa0
; RV64IZFH-NEXT: and a0, a1, a0
-; RV64IZFH-NEXT: addi a1, zero, 1
-; RV64IZFH-NEXT: bne a0, a1, .LBB15_2
+; RV64IZFH-NEXT: beqz a0, .LBB15_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: and a0, a0, a1
-; RV32I-NEXT: addi a1, zero, 1
-; RV32I-NEXT: bne a0, a1, .LBB1_2
+; RV32I-NEXT: beqz a0, .LBB1_2
; RV32I-NEXT: # %bb.1: # %if.then
; RV32I-NEXT: call both@plt
; RV32I-NEXT: j .LBB1_3
; RV32IBT-NEXT: addi sp, sp, -16
; RV32IBT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IBT-NEXT: and a0, a0, a1
-; RV32IBT-NEXT: addi a1, zero, 1
-; RV32IBT-NEXT: bne a0, a1, .LBB1_2
+; RV32IBT-NEXT: beqz a0, .LBB1_2
; RV32IBT-NEXT: # %bb.1: # %if.then
; RV32IBT-NEXT: call both@plt
; RV32IBT-NEXT: j .LBB1_3
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: and a0, a0, a1
-; RV64I-NEXT: addi a1, zero, 1
-; RV64I-NEXT: bne a0, a1, .LBB1_2
+; RV64I-NEXT: beqz a0, .LBB1_2
; RV64I-NEXT: # %bb.1: # %if.then
; RV64I-NEXT: call both@plt
; RV64I-NEXT: j .LBB1_3
; RV64IBT-NEXT: addi sp, sp, -16
; RV64IBT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IBT-NEXT: and a0, a0, a1
-; RV64IBT-NEXT: addi a1, zero, 1
-; RV64IBT-NEXT: bne a0, a1, .LBB1_2
+; RV64IBT-NEXT: beqz a0, .LBB1_2
; RV64IBT-NEXT: # %bb.1: # %if.then
; RV64IBT-NEXT: call both@plt
; RV64IBT-NEXT: j .LBB1_3
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-NEXT: or a0, a0, a1
-; RV32I-NEXT: addi a1, zero, 1
-; RV32I-NEXT: bne a0, a1, .LBB1_2
+; RV32I-NEXT: beqz a0, .LBB1_2
; RV32I-NEXT: # %bb.1: # %if.then
; RV32I-NEXT: call either@plt
; RV32I-NEXT: j .LBB1_3
; RV32IBT-NEXT: addi sp, sp, -16
; RV32IBT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IBT-NEXT: or a0, a0, a1
-; RV32IBT-NEXT: addi a1, zero, 1
-; RV32IBT-NEXT: bne a0, a1, .LBB1_2
+; RV32IBT-NEXT: beqz a0, .LBB1_2
; RV32IBT-NEXT: # %bb.1: # %if.then
; RV32IBT-NEXT: call either@plt
; RV32IBT-NEXT: j .LBB1_3
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: or a0, a0, a1
-; RV64I-NEXT: addi a1, zero, 1
-; RV64I-NEXT: bne a0, a1, .LBB1_2
+; RV64I-NEXT: beqz a0, .LBB1_2
; RV64I-NEXT: # %bb.1: # %if.then
; RV64I-NEXT: call either@plt
; RV64I-NEXT: j .LBB1_3
; RV64IBT-NEXT: addi sp, sp, -16
; RV64IBT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IBT-NEXT: or a0, a0, a1
-; RV64IBT-NEXT: addi a1, zero, 1
-; RV64IBT-NEXT: bne a0, a1, .LBB1_2
+; RV64IBT-NEXT: beqz a0, .LBB1_2
; RV64IBT-NEXT: # %bb.1: # %if.then
; RV64IBT-NEXT: call either@plt
; RV64IBT-NEXT: j .LBB1_3