From 22d51014afb3b29e2918bf7cf8c17b04944da6b0 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sat, 29 Sep 2018 14:17:32 +0000 Subject: [PATCH] [X86] getTargetConstantBitsFromNode - add support for peeking through ISD::EXTRACT_SUBVECTOR llvm-svn: 343375 --- llvm/lib/Target/X86/X86ISelLowering.cpp | 15 +++++++++++++++ llvm/test/CodeGen/X86/pr38639.ll | 11 +++++------ llvm/test/CodeGen/X86/vector-shuffle-avx512.ll | 8 ++++---- 3 files changed, 24 insertions(+), 10 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 58a1865..a71c327 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -5742,6 +5742,21 @@ static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits, return CastBitData(UndefSrcElts, SrcEltBits); } + // Extract constant bits from a subvector's source. + if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR && + isa(Op.getOperand(1))) { + if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits, + UndefElts, EltBits, AllowWholeUndefs, + AllowPartialUndefs)) { + unsigned NumSubElts = VT.getVectorNumElements(); + unsigned BaseIdx = Op.getConstantOperandVal(1); + UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx); + EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end()); + EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx); + return true; + } + } + return false; } diff --git a/llvm/test/CodeGen/X86/pr38639.ll b/llvm/test/CodeGen/X86/pr38639.ll index c877568..502cf43 100644 --- a/llvm/test/CodeGen/X86/pr38639.ll +++ b/llvm/test/CodeGen/X86/pr38639.ll @@ -4,12 +4,11 @@ define <8 x double> @test(<4 x double> %a, <4 x double> %b) { ; CHECK-LABEL: test: ; CHECK: # %bb.0: -; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = -; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm4 -; CHECK-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1,2,3],ymm2[4,5,6,7] -; CHECK-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1],ymm2[2,3],ymm0[4,5,6,7] -; CHECK-NEXT: vblendps {{.*#+}} xmm2 = xmm4[0,1],xmm2[2,3] -; CHECK-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm3[1],ymm1[1],ymm3[3],ymm1[3] +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = +; CHECK-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0,1,2,3],ymm1[4,5,6,7] +; CHECK-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7] +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm2[1],ymm1[3],ymm2[3] +; CHECK-NEXT: vmovaps {{.*#+}} xmm2 = [8.207174e-01,8.207174e-01] ; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7] ; CHECK-NEXT: retq %1 = shufflevector <4 x double> %a, <4 x double> , <8 x i32> diff --git a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll index 70d3065..de1f51b 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-avx512.ll @@ -541,7 +541,7 @@ define <8 x float> @expand14(<4 x float> %a) { define <8 x float> @expand15(<4 x float> %a) { ; SKX64-LABEL: expand15: ; SKX64: # %bb.0: -; SKX64-NEXT: vpermilps {{.*#+}} xmm1 = mem[0,1,0,0] +; SKX64-NEXT: vmovaps {{.*#+}} xmm1 = [0.000000e+00,2.000000e+00,0.000000e+00,0.000000e+00] ; SKX64-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[0,1,1,3] ; SKX64-NEXT: vmovaps {{.*#+}} ymm0 = [0,1,8,3,10,3,2,3] ; SKX64-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0 @@ -549,7 +549,7 @@ define <8 x float> @expand15(<4 x float> %a) { ; ; KNL64-LABEL: expand15: ; KNL64: # %bb.0: -; KNL64-NEXT: vpermilps {{.*#+}} xmm1 = mem[0,1,0,0] +; KNL64-NEXT: vmovaps {{.*#+}} xmm1 = [0.000000e+00,2.000000e+00,0.000000e+00,0.000000e+00] ; KNL64-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,1] ; KNL64-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3] ; KNL64-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,3] @@ -558,7 +558,7 @@ define <8 x float> @expand15(<4 x float> %a) { ; ; SKX32-LABEL: expand15: ; SKX32: # %bb.0: -; SKX32-NEXT: vpermilps {{.*#+}} xmm1 = mem[0,1,0,0] +; SKX32-NEXT: vmovaps {{.*#+}} xmm1 = [0.000000e+00,2.000000e+00,0.000000e+00,0.000000e+00] ; SKX32-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[0,1,1,3] ; SKX32-NEXT: vmovaps {{.*#+}} ymm0 = [0,1,8,3,10,3,2,3] ; SKX32-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0 @@ -566,7 +566,7 @@ define <8 x float> @expand15(<4 x float> %a) { ; ; KNL32-LABEL: expand15: ; KNL32: # %bb.0: -; KNL32-NEXT: vpermilps {{.*#+}} xmm1 = mem[0,1,0,0] +; KNL32-NEXT: vmovaps {{.*#+}} xmm1 = [0.000000e+00,2.000000e+00,0.000000e+00,0.000000e+00] ; KNL32-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,1,1] ; KNL32-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3] ; KNL32-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,1,3] -- 2.7.4