From 40e7ba00467fcd1f15ed8e540d43eb6caf200b22 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Tue, 23 Feb 2016 16:36:07 +0000 Subject: [PATCH] [InstCombine] add helper function to foldCastedBitwiseLogic() ; NFCI This is a straight cut and paste of the existing code and is intended to be the first step in solving part of PR26702: https://llvm.org/bugs/show_bug.cgi?id=26702 We should be able to reuse most of this and delete the nearly identical existing code in visitOr(). Then, we can enhance visitXor() to use the same code too. llvm-svn: 261649 --- .../Transforms/InstCombine/InstCombineAndOrXor.cpp | 69 +++++++++++++--------- .../Transforms/InstCombine/InstCombineInternal.h | 1 + 2 files changed, 41 insertions(+), 29 deletions(-) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index 8ca0e50..4ac740e 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1243,6 +1243,44 @@ static Instruction *matchDeMorgansLaws(BinaryOperator &I, return nullptr; } +Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) { + Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); + if (CastInst *Op0C = dyn_cast(Op0)) { + Value *Op0COp = Op0C->getOperand(0); + Type *SrcTy = Op0COp->getType(); + // fold (and (cast A), (cast B)) -> (cast (and A, B)) + if (CastInst *Op1C = dyn_cast(Op1)) { + if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ? + SrcTy == Op1C->getOperand(0)->getType() && + SrcTy->isIntOrIntVectorTy()) { + Value *Op1COp = Op1C->getOperand(0); + + // Only do this if the casts both really cause code to be generated. + if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) && + ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) { + Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName()); + return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType()); + } + + // If this is and(cast(icmp), cast(icmp)), try to fold this even if the + // cast is otherwise not optimizable. This happens for vector sexts. + if (ICmpInst *RHS = dyn_cast(Op1COp)) + if (ICmpInst *LHS = dyn_cast(Op0COp)) + if (Value *Res = FoldAndOfICmps(LHS, RHS)) + return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); + + // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the + // cast is otherwise not optimizable. This happens for vector sexts. + if (FCmpInst *RHS = dyn_cast(Op1COp)) + if (FCmpInst *LHS = dyn_cast(Op0COp)) + if (Value *Res = FoldAndOfFCmps(LHS, RHS)) + return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); + } + } + } + return nullptr; +} + Instruction *InstCombiner::visitAnd(BinaryOperator &I) { bool Changed = SimplifyAssociativeOrCommutative(I); Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); @@ -1480,39 +1518,12 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { if (Value *Res = FoldAndOfFCmps(LHS, RHS)) return replaceInstUsesWith(I, Res); + if (Instruction *CastedAnd = foldCastedBitwiseLogic(I)) + return CastedAnd; if (CastInst *Op0C = dyn_cast(Op0)) { Value *Op0COp = Op0C->getOperand(0); Type *SrcTy = Op0COp->getType(); - // fold (and (cast A), (cast B)) -> (cast (and A, B)) - if (CastInst *Op1C = dyn_cast(Op1)) { - if (Op0C->getOpcode() == Op1C->getOpcode() && // same cast kind ? - SrcTy == Op1C->getOperand(0)->getType() && - SrcTy->isIntOrIntVectorTy()) { - Value *Op1COp = Op1C->getOperand(0); - - // Only do this if the casts both really cause code to be generated. - if (ShouldOptimizeCast(Op0C->getOpcode(), Op0COp, I.getType()) && - ShouldOptimizeCast(Op1C->getOpcode(), Op1COp, I.getType())) { - Value *NewOp = Builder->CreateAnd(Op0COp, Op1COp, I.getName()); - return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType()); - } - - // If this is and(cast(icmp), cast(icmp)), try to fold this even if the - // cast is otherwise not optimizable. This happens for vector sexts. - if (ICmpInst *RHS = dyn_cast(Op1COp)) - if (ICmpInst *LHS = dyn_cast(Op0COp)) - if (Value *Res = FoldAndOfICmps(LHS, RHS)) - return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); - - // If this is and(cast(fcmp), cast(fcmp)), try to fold this even if the - // cast is otherwise not optimizable. This happens for vector sexts. - if (FCmpInst *RHS = dyn_cast(Op1COp)) - if (FCmpInst *LHS = dyn_cast(Op0COp)) - if (Value *Res = FoldAndOfFCmps(LHS, RHS)) - return CastInst::Create(Op0C->getOpcode(), Res, I.getType()); - } - } // If we are masking off the sign bit of a floating-point value, convert // this to the canonical fabs intrinsic call and cast back to integer. diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index cafe8f6..c251683 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -390,6 +390,7 @@ private: Value *EmitGEPOffset(User *GEP); Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN); Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef Mask); + Instruction *foldCastedBitwiseLogic(BinaryOperator &I); public: /// \brief Inserts an instruction \p New before instruction \p Old -- 2.7.4