From: Roman Lebedev Date: Sat, 14 Jul 2018 12:20:16 +0000 (+0000) Subject: [InstCombine] Fold x & (-1 >> y) u< x to x u> (-1 >> y) X-Git-Tag: llvmorg-7.0.0-rc1~1426 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e3dc587ae00bc4c79167a1be057ecd5efc0789aa;p=platform%2Fupstream%2Fllvm.git [InstCombine] Fold x & (-1 >> y) u< x to x u> (-1 >> y) https://bugs.llvm.org/show_bug.cgi?id=38123 https://rise4fun.com/Alive/ocb This pattern is not commutative. But InstSimplify will already have taken care of the 'commutative' variant. llvm-svn: 337098 --- diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index f89dca4..eec8fad 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -2897,6 +2897,11 @@ static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I, assert(X == I.getOperand(1) && "instsimplify took care of commut. variant"); DstPred = ICmpInst::Predicate::ICMP_ULE; break; + case ICmpInst::Predicate::ICMP_ULT: + // x & (-1 >> y) u< x -> x u> (-1 >> y) + assert(X == I.getOperand(1) && "instsimplify took care of commut. variant"); + DstPred = ICmpInst::Predicate::ICMP_UGT; + break; // TODO: more folds are possible, https://bugs.llvm.org/show_bug.cgi?id=38123 default: return nullptr; diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll index 8ce0819..287e369 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll @@ -15,9 +15,8 @@ define i1 @p0(i8 %x) { ; CHECK-LABEL: @p0( -; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[X:%.*]], 3 -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i8 [[X:%.*]], 3 +; CHECK-NEXT: ret i1 [[TMP1]] ; %tmp0 = and i8 %x, 3 %ret = icmp ult i8 %tmp0, %x @@ -27,9 +26,8 @@ define i1 @p0(i8 %x) { define i1 @pv(i8 %x, i8 %y) { ; CHECK-LABEL: @pv( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP1]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %tmp0 = lshr i8 -1, %y %tmp1 = and i8 %tmp0, %x @@ -43,9 +41,8 @@ define i1 @pv(i8 %x, i8 %y) { define <2 x i1> @p1_vec_splat(<2 x i8> %x) { ; CHECK-LABEL: @p1_vec_splat( -; CHECK-NEXT: [[TMP0:%.*]] = and <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[RET:%.*]] = icmp ult <2 x i8> [[TMP0]], [[X]] -; CHECK-NEXT: ret <2 x i1> [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt <2 x i8> [[X:%.*]], +; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; %tmp0 = and <2 x i8> %x, %ret = icmp ult <2 x i8> %tmp0, %x @@ -54,9 +51,8 @@ define <2 x i1> @p1_vec_splat(<2 x i8> %x) { define <2 x i1> @p2_vec_nonsplat(<2 x i8> %x) { ; CHECK-LABEL: @p2_vec_nonsplat( -; CHECK-NEXT: [[TMP0:%.*]] = and <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[RET:%.*]] = icmp ult <2 x i8> [[TMP0]], [[X]] -; CHECK-NEXT: ret <2 x i1> [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt <2 x i8> [[X:%.*]], +; CHECK-NEXT: ret <2 x i1> [[TMP1]] ; %tmp0 = and <2 x i8> %x, ; doesn't have to be splat. %ret = icmp ult <2 x i8> %tmp0, %x @@ -65,9 +61,8 @@ define <2 x i1> @p2_vec_nonsplat(<2 x i8> %x) { define <3 x i1> @p3_vec_splat_undef(<3 x i8> %x) { ; CHECK-LABEL: @p3_vec_splat_undef( -; CHECK-NEXT: [[TMP0:%.*]] = and <3 x i8> [[X:%.*]], -; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[TMP0]], [[X]] -; CHECK-NEXT: ret <3 x i1> [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt <3 x i8> [[X:%.*]], +; CHECK-NEXT: ret <3 x i1> [[TMP1]] ; %tmp0 = and <3 x i8> %x, %ret = icmp ult <3 x i8> %tmp0, %x @@ -100,9 +95,8 @@ define i1 @cv0(i8 %y) { ; CHECK-LABEL: @cv0( ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[TMP0]] -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP1]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i8 [[X]], [[TMP0]] +; CHECK-NEXT: ret i1 [[TMP1]] ; %x = call i8 @gen8() %tmp0 = lshr i8 -1, %y @@ -145,8 +139,8 @@ define i1 @oneuse0(i8 %x) { ; CHECK-LABEL: @oneuse0( ; CHECK-NEXT: [[TMP0:%.*]] = and i8 [[X:%.*]], 3 ; CHECK-NEXT: call void @use8(i8 [[TMP0]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X]] -; CHECK-NEXT: ret i1 [[RET]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i8 [[X]], 3 +; CHECK-NEXT: ret i1 [[TMP1]] ; %tmp0 = and i8 %x, 3 call void @use8(i8 %tmp0)