Cond2, Name);
}
+ Value *CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2,
+ const Twine &Name = "") {
+ switch (Opc) {
+ case Instruction::And:
+ return CreateLogicalAnd(Cond1, Cond2, Name);
+ case Instruction::Or:
+ return CreateLogicalOr(Cond1, Cond2, Name);
+ default:
+ break;
+ }
+ llvm_unreachable("Not a logical operation.");
+ }
+
// NOTE: this is sequential, non-commutative, ordered reduction!
Value *CreateLogicalOr(ArrayRef<Value *> Ops) {
assert(!Ops.empty());
}
// (~x) & y --> ~(x | (~y)) iff that gets rid of inversions
- if (sinkNotIntoOtherHandOfAndOrOr(I))
+ if (sinkNotIntoOtherHandOfLogicalOp(I))
return &I;
// An and recurrence w/loop invariant step is equivelent to (and start, step)
}
// (~x) | y --> ~(x & (~y)) iff that gets rid of inversions
- if (sinkNotIntoOtherHandOfAndOrOr(I))
+ if (sinkNotIntoOtherHandOfLogicalOp(I))
return &I;
// Improve "get low bit mask up to and including bit X" pattern:
// into:
// z = ~(x |/& (~y))
// iff y is free to invert and all uses of z can be freely updated.
-bool InstCombinerImpl::sinkNotIntoOtherHandOfAndOrOr(BinaryOperator &I) {
- Instruction::BinaryOps NewOpc;
- switch (I.getOpcode()) {
- case Instruction::And:
- NewOpc = Instruction::Or;
- break;
- case Instruction::Or:
- NewOpc = Instruction::And;
- break;
- default:
- return false;
- };
-
- Value *X, *Y;
- if (!match(&I, m_c_BinOp(m_Not(m_Value(X)), m_Value(Y))))
+bool InstCombinerImpl::sinkNotIntoOtherHandOfLogicalOp(Instruction &I) {
+ Value *Op0, *Op1;
+ if (!match(&I, m_LogicalOp(m_Value(Op0), m_Value(Op1))))
return false;
-
- // Will we be able to fold the `not` into Y eventually?
- if (!InstCombiner::isFreeToInvert(Y, Y->hasOneUse()))
+ Instruction::BinaryOps NewOpc =
+ match(&I, m_LogicalAnd()) ? Instruction::Or : Instruction::And;
+ bool IsBinaryOp = isa<BinaryOperator>(I);
+
+ Value *NotOp0 = nullptr;
+ Value *NotOp1 = nullptr;
+ Value **OpToInvert = nullptr;
+ if (match(Op0, m_Not(m_Value(NotOp0))) &&
+ InstCombiner::isFreeToInvert(Op1, Op1->hasOneUse())) {
+ Op0 = NotOp0;
+ OpToInvert = &Op1;
+ } else if (match(Op1, m_Not(m_Value(NotOp1))) &&
+ InstCombiner::isFreeToInvert(Op0, Op0->hasOneUse())) {
+ Op1 = NotOp1;
+ OpToInvert = &Op0;
+ } else
return false;
// And can our users be adapted?
if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
return false;
- Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
- Value *NewBinOp =
- BinaryOperator::Create(NewOpc, X, NotY, I.getName() + ".not");
- Builder.Insert(NewBinOp);
+ *OpToInvert =
+ Builder.CreateNot(*OpToInvert, (*OpToInvert)->getName() + ".not");
+ Value *NewBinOp;
+ if (IsBinaryOp)
+ NewBinOp = Builder.CreateBinOp(NewOpc, Op0, Op1, I.getName() + ".not");
+ else
+ NewBinOp = Builder.CreateLogicalOp(NewOpc, Op0, Op1, I.getName() + ".not");
replaceInstUsesWith(I, NewBinOp);
// We can not just create an outer `not`, it will most likely be immediately
// folded back, reconstructing our initial pattern, and causing an
Value *simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, bool Inverted);
Instruction *visitAnd(BinaryOperator &I);
Instruction *visitOr(BinaryOperator &I);
- bool sinkNotIntoOtherHandOfAndOrOr(BinaryOperator &I);
+ bool sinkNotIntoOtherHandOfLogicalOp(Instruction &I);
Instruction *visitXor(BinaryOperator &I);
Instruction *visitShl(BinaryOperator &I);
Value *reassociateShiftAmtsOfTwoSameDirectionShifts(
if (Instruction *I = foldNestedSelects(SI, Builder))
return I;
+ // Match logical variants of the pattern,
+ // and transform them iff that gets rid of inversions.
+ // (~x) | y --> ~(x & (~y))
+ // (~x) & y --> ~(x | (~y))
+ if (sinkNotIntoOtherHandOfLogicalOp(SI))
+ return &SI;
+
return nullptr;
}
define i32 @t0(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
; CHECK-LABEL: @t0(
; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = or i1 [[I1]], [[I0:%.*]]
-; CHECK-NEXT: [[I4:%.*]] = select i1 [[TMP1]], i32 [[V3:%.*]], i32 [[V2:%.*]]
+; CHECK-NEXT: [[I3_NOT:%.*]] = or i1 [[I1]], [[I0:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V3:%.*]], i32 [[V2:%.*]]
; CHECK-NEXT: ret i32 [[I4]]
;
%i1 = icmp eq i32 %v0, %v1
; CHECK-NEXT: [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]]
; CHECK-NEXT: call void @use1(i1 [[I0]])
-; CHECK-NEXT: [[TMP1:%.*]] = or i1 [[I0]], [[I1]]
-; CHECK-NEXT: [[I4:%.*]] = select i1 [[TMP1]], i32 [[V5:%.*]], i32 [[V4:%.*]]
+; CHECK-NEXT: [[I3_NOT:%.*]] = or i1 [[I1]], [[I0]]
+; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V5:%.*]], i32 [[V4:%.*]]
; CHECK-NEXT: ret i32 [[I4]]
;
%i0 = icmp eq i32 %v0, %v1
; Most basic positive test
define i32 @t0(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
; CHECK-LABEL: @t0(
-; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT: [[I2:%.*]] = xor i1 [[I0:%.*]], true
-; CHECK-NEXT: [[I3:%.*]] = select i1 [[I2]], i1 [[I1]], i1 false
-; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3]], i32 [[V2:%.*]], i32 [[V3:%.*]]
+; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT: [[I3_NOT:%.*]] = select i1 [[I0:%.*]], i1 true, i1 [[I1]]
+; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V3:%.*]], i32 [[V2:%.*]]
; CHECK-NEXT: ret i32 [[I4]]
;
%i1 = icmp eq i32 %v0, %v1
}
define i32 @t0_commutative(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
; CHECK-LABEL: @t0_commutative(
-; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT: [[I2:%.*]] = xor i1 [[I0:%.*]], true
-; CHECK-NEXT: [[I3:%.*]] = select i1 [[I1]], i1 [[I2]], i1 false
-; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3]], i32 [[V2:%.*]], i32 [[V3:%.*]]
+; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT: [[I3_NOT:%.*]] = select i1 [[I1]], i1 true, i1 [[I0:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V3:%.*]], i32 [[V2:%.*]]
; CHECK-NEXT: ret i32 [[I4]]
;
%i1 = icmp eq i32 %v0, %v1
define i32 @t1(i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5) {
; CHECK-LABEL: @t1(
; CHECK-NEXT: [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V2:%.*]], [[V3:%.*]]
+; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]]
; CHECK-NEXT: call void @use1(i1 [[I0]])
-; CHECK-NEXT: [[I2:%.*]] = xor i1 [[I0]], true
-; CHECK-NEXT: [[I3:%.*]] = select i1 [[I2]], i1 [[I1]], i1 false
-; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3]], i32 [[V4:%.*]], i32 [[V5:%.*]]
+; CHECK-NEXT: [[I3_NOT:%.*]] = select i1 [[I0]], i1 true, i1 [[I1]]
+; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V5:%.*]], i32 [[V4:%.*]]
; CHECK-NEXT: ret i32 [[I4]]
;
%i0 = icmp eq i32 %v0, %v1
define i32 @t1_commutative(i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5) {
; CHECK-LABEL: @t1_commutative(
; CHECK-NEXT: [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V2:%.*]], [[V3:%.*]]
+; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]]
; CHECK-NEXT: call void @use1(i1 [[I0]])
-; CHECK-NEXT: [[I2:%.*]] = xor i1 [[I0]], true
-; CHECK-NEXT: [[I3:%.*]] = select i1 [[I1]], i1 [[I2]], i1 false
-; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3]], i32 [[V4:%.*]], i32 [[V5:%.*]]
+; CHECK-NEXT: [[I3_NOT:%.*]] = select i1 [[I1]], i1 true, i1 [[I0]]
+; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V5:%.*]], i32 [[V4:%.*]]
; CHECK-NEXT: ret i32 [[I4]]
;
%i0 = icmp eq i32 %v0, %v1
; Most basic positive test
define i32 @t0(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
; CHECK-LABEL: @t0(
-; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT: [[I2:%.*]] = xor i1 [[I0:%.*]], true
-; CHECK-NEXT: [[I3:%.*]] = select i1 [[I2]], i1 true, i1 [[I1]]
-; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3]], i32 [[V2:%.*]], i32 [[V3:%.*]]
+; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT: [[I3_NOT:%.*]] = select i1 [[I0:%.*]], i1 [[I1]], i1 false
+; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V3:%.*]], i32 [[V2:%.*]]
; CHECK-NEXT: ret i32 [[I4]]
;
%i1 = icmp eq i32 %v0, %v1
}
define i32 @t0_commutative(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
; CHECK-LABEL: @t0_commutative(
-; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT: [[I2:%.*]] = xor i1 [[I0:%.*]], true
-; CHECK-NEXT: [[I3:%.*]] = select i1 [[I1]], i1 true, i1 [[I2]]
-; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3]], i32 [[V2:%.*]], i32 [[V3:%.*]]
+; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]]
+; CHECK-NEXT: [[I3_NOT:%.*]] = select i1 [[I1]], i1 [[I0:%.*]], i1 false
+; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V3:%.*]], i32 [[V2:%.*]]
; CHECK-NEXT: ret i32 [[I4]]
;
%i1 = icmp eq i32 %v0, %v1
define i32 @t1(i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5) {
; CHECK-LABEL: @t1(
; CHECK-NEXT: [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V2:%.*]], [[V3:%.*]]
+; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]]
; CHECK-NEXT: call void @use1(i1 [[I0]])
-; CHECK-NEXT: [[I2:%.*]] = xor i1 [[I0]], true
-; CHECK-NEXT: [[I3:%.*]] = select i1 [[I2]], i1 true, i1 [[I1]]
-; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3]], i32 [[V4:%.*]], i32 [[V5:%.*]]
+; CHECK-NEXT: [[I3_NOT:%.*]] = select i1 [[I0]], i1 [[I1]], i1 false
+; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V5:%.*]], i32 [[V4:%.*]]
; CHECK-NEXT: ret i32 [[I4]]
;
%i0 = icmp eq i32 %v0, %v1
define i32 @t1_commutative(i32 %v0, i32 %v1, i32 %v2, i32 %v3, i32 %v4, i32 %v5) {
; CHECK-LABEL: @t1_commutative(
; CHECK-NEXT: [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V2:%.*]], [[V3:%.*]]
+; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]]
; CHECK-NEXT: call void @use1(i1 [[I0]])
-; CHECK-NEXT: [[I2:%.*]] = xor i1 [[I0]], true
-; CHECK-NEXT: [[I3:%.*]] = select i1 [[I1]], i1 true, i1 [[I2]]
-; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3]], i32 [[V4:%.*]], i32 [[V5:%.*]]
+; CHECK-NEXT: [[I3_NOT:%.*]] = select i1 [[I1]], i1 [[I0]], i1 false
+; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V5:%.*]], i32 [[V4:%.*]]
; CHECK-NEXT: ret i32 [[I4]]
;
%i0 = icmp eq i32 %v0, %v1
define i32 @t0(i1 %i0, i32 %v0, i32 %v1, i32 %v2, i32 %v3) {
; CHECK-LABEL: @t0(
; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V0:%.*]], [[V1:%.*]]
-; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[I1]], [[I0:%.*]]
-; CHECK-NEXT: [[I4:%.*]] = select i1 [[TMP1]], i32 [[V3:%.*]], i32 [[V2:%.*]]
+; CHECK-NEXT: [[I3_NOT:%.*]] = and i1 [[I1]], [[I0:%.*]]
+; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V3:%.*]], i32 [[V2:%.*]]
; CHECK-NEXT: ret i32 [[I4]]
;
%i1 = icmp eq i32 %v0, %v1
; CHECK-NEXT: [[I0:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]]
; CHECK-NEXT: [[I1:%.*]] = icmp ne i32 [[V2:%.*]], [[V3:%.*]]
; CHECK-NEXT: call void @use1(i1 [[I0]])
-; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[I0]], [[I1]]
-; CHECK-NEXT: [[I4:%.*]] = select i1 [[TMP1]], i32 [[V5:%.*]], i32 [[V4:%.*]]
+; CHECK-NEXT: [[I3_NOT:%.*]] = and i1 [[I1]], [[I0]]
+; CHECK-NEXT: [[I4:%.*]] = select i1 [[I3_NOT]], i32 [[V5:%.*]], i32 [[V4:%.*]]
; CHECK-NEXT: ret i32 [[I4]]
;
%i0 = icmp eq i32 %v0, %v1