From 2ac58e21a115d16a91578defc4636c43e3608a2e Mon Sep 17 00:00:00 2001 From: Jun Ma Date: Fri, 11 Dec 2020 11:29:47 +0800 Subject: [PATCH] [InstCombine] Remove scalable vector restriction when fold SelectInst Differential Revision: https://reviews.llvm.org/D93083 --- .../Transforms/InstCombine/InstCombineCasts.cpp | 10 ++-- .../Transforms/InstCombine/InstCombineSelect.cpp | 7 +-- llvm/test/Transforms/InstCombine/select.ll | 68 ++++++++++++++++++++++ 3 files changed, 75 insertions(+), 10 deletions(-) diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index 6e94e58..e9ec802 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -2322,13 +2322,11 @@ static Instruction *foldBitCastSelect(BitCastInst &BitCast, // A vector select must maintain the same number of elements in its operands. Type *CondTy = Cond->getType(); Type *DestTy = BitCast.getType(); - if (auto *CondVTy = dyn_cast(CondTy)) { - if (!DestTy->isVectorTy()) + if (auto *CondVTy = dyn_cast(CondTy)) + if (!DestTy->isVectorTy() || + CondVTy->getElementCount() != + cast(DestTy)->getElementCount()) return nullptr; - if (cast(DestTy)->getNumElements() != - cast(CondVTy)->getNumElements()) - return nullptr; - } // FIXME: This transform is restricted from changing the select between // scalars and vectors to avoid backend problems caused by creating diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp index 397cb0b0..c32f6ac 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -283,10 +283,9 @@ Instruction *InstCombinerImpl::foldSelectOpOp(SelectInst &SI, Instruction *TI, // The select condition may be a vector. We may only change the operand // type if the vector width remains the same (and matches the condition). if (auto *CondVTy = dyn_cast(CondTy)) { - if (!FIOpndTy->isVectorTy()) - return nullptr; - if (cast(CondVTy)->getNumElements() != - cast(FIOpndTy)->getNumElements()) + if (!FIOpndTy->isVectorTy() || + CondVTy->getElementCount() != + cast(FIOpndTy)->getElementCount()) return nullptr; // TODO: If the backend knew how to deal with casts better, we could diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll index 987f34e..819ac6d 100644 --- a/llvm/test/Transforms/InstCombine/select.ll +++ b/llvm/test/Transforms/InstCombine/select.ll @@ -68,6 +68,15 @@ define <2 x i1> @test8vec(<2 x i1> %C, <2 x i1> %X) { ret <2 x i1> %R } +define @test8vvec( %C, %X) { +; CHECK-LABEL: @test8vvec( +; CHECK-NEXT: [[R:%.*]] = and [[C:%.*]], [[X:%.*]] +; CHECK-NEXT: ret [[R]] +; + %R = select %C, %X, zeroinitializer + ret %R +} + define i1 @test9(i1 %C, i1 %X) { ; CHECK-LABEL: @test9( ; CHECK-NEXT: [[NOT_C:%.*]] = xor i1 [[C:%.*]], true @@ -88,6 +97,16 @@ define <2 x i1> @test9vec(<2 x i1> %C, <2 x i1> %X) { ret <2 x i1> %R } +define @test9vvec( %C, %X) { +; CHECK-LABEL: @test9vvec( +; CHECK-NEXT: [[NOT_C:%.*]] = xor [[C:%.*]], shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer) +; CHECK-NEXT: [[R:%.*]] = and [[NOT_C]], [[X:%.*]] +; CHECK-NEXT: ret [[R]] +; + %R = select %C, zeroinitializer, %X + ret %R +} + define i1 @test10(i1 %C, i1 %X) { ; CHECK-LABEL: @test10( ; CHECK-NEXT: [[NOT_C:%.*]] = xor i1 [[C:%.*]], true @@ -699,6 +718,34 @@ define i48 @test51(<3 x i1> %icmp, <3 x i16> %tmp) { ret i48 %tmp2 } +define @bitcast_select_bitcast( %icmp, %a, %b) { +; CHECK-LABEL: @bitcast_select_bitcast( +; CHECK-NEXT: [[BC1:%.*]] = bitcast [[A:%.*]] to +; CHECK-NEXT: [[SELECT:%.*]] = select [[ICMP:%.*]], [[B:%.*]], [[BC1]] +; CHECK-NEXT: ret [[SELECT]] +; + %bc1 = bitcast %b to + %select = select %icmp, %bc1, %a + %bc2 = bitcast %select to + ret %bc2 +} + +define void @select_oneuse_bitcast( %a, %b, %c, %d, * %ptr1) { +; CHECK-LABEL: @select_oneuse_bitcast( +; CHECK-NEXT: [[CMP:%.*]] = icmp ult [[C:%.*]], [[D:%.*]] +; CHECK-NEXT: [[SEL1_V:%.*]] = select [[CMP]], [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast * [[PTR1:%.*]] to * +; CHECK-NEXT: store [[SEL1_V]], * [[TMP1]], align 16 +; CHECK-NEXT: ret void +; + %cmp = icmp ult %c, %d + %bc1 = bitcast %a to + %bc2 = bitcast %b to + %sel1 = select %cmp, %bc1, %bc2 + store %sel1, * %ptr1 + ret void +} + ; Allow select promotion even if there are multiple uses of bitcasted ops. ; Hoisting the selects allows later pattern matching to see that these are min/max ops. @@ -723,6 +770,27 @@ define void @min_max_bitcast(<4 x float> %a, <4 x float> %b, <4 x i32>* %ptr1, < ret void } +define void @min_max_bitcast1( %a, %b, * %ptr1, * %ptr2) { +; CHECK-LABEL: @min_max_bitcast1( +; CHECK-NEXT: [[CMP:%.*]] = fcmp olt [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[SEL1_V:%.*]] = select [[CMP]], [[A]], [[B]] +; CHECK-NEXT: [[SEL2_V:%.*]] = select [[CMP]], [[B]], [[A]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast * [[PTR1:%.*]] to * +; CHECK-NEXT: store [[SEL1_V]], * [[TMP1]], align 16 +; CHECK-NEXT: [[TMP2:%.*]] = bitcast * [[PTR2:%.*]] to * +; CHECK-NEXT: store [[SEL2_V]], * [[TMP2]], align 16 +; CHECK-NEXT: ret void +; + %cmp = fcmp olt %a, %b + %bc1 = bitcast %a to + %bc2 = bitcast %b to + %sel1 = select %cmp, %bc1, %bc2 + %sel2 = select %cmp, %bc2, %bc1 + store %sel1, * %ptr1 + store %sel2, * %ptr2 + ret void +} + ; To avoid potential backend problems, we don't do the same transform for other casts. define void @truncs_before_selects(<4 x float> %f1, <4 x float> %f2, <4 x i64> %a, <4 x i64> %b, <4 x i32>* %ptr1, <4 x i32>* %ptr2) { -- 2.7.4