From d633e290c8b5c831d09b3ff3ae72e4ae79a96171 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sat, 29 Sep 2018 17:01:55 +0000 Subject: [PATCH] [X86] getTargetConstantBitsFromNode - add support for rearranging constant bits via shuffles Exposed an issue that recursive calls to getTargetConstantBitsFromNode don't handle changes to EltSizeInBits yet. llvm-svn: 343384 --- llvm/lib/Target/X86/X86ISelLowering.cpp | 47 ++++++++++++++++++++++++++ llvm/test/CodeGen/X86/known-signbits-vector.ll | 34 +++++++++---------- llvm/test/CodeGen/X86/packss.ll | 40 ++++++++-------------- llvm/test/CodeGen/X86/vector-shift-ashr-256.ll | 42 +++++++++++------------ llvm/test/CodeGen/X86/vector-shift-lshr-256.ll | 16 ++++----- llvm/test/CodeGen/X86/vector-shift-shl-256.ll | 16 ++++----- 6 files changed, 109 insertions(+), 86 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index c2054c1..cb5af7d 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -5745,6 +5745,10 @@ static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits, // Extract constant bits from a subvector's source. if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR && isa(Op.getOperand(1))) { + // TODO - support extract_subvector through bitcasts. + if (EltSizeInBits != VT.getScalarSizeInBits()) + return false; + if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits, UndefElts, EltBits, AllowWholeUndefs, AllowPartialUndefs)) { @@ -5761,6 +5765,49 @@ static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits, } } + // Extract constant bits from shuffle node sources. + if (auto *SVN = dyn_cast(Op)) { + // TODO - support shuffle through bitcasts. + if (EltSizeInBits != VT.getScalarSizeInBits()) + return false; + + ArrayRef Mask = SVN->getMask(); + if ((!AllowWholeUndefs || !AllowPartialUndefs) && + llvm::any_of(Mask, [](int M) { return M < 0; })) + return false; + + APInt UndefElts0, UndefElts1; + SmallVector EltBits0, EltBits1; + if (isAnyInRange(Mask, 0, NumElts) && + !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits, + UndefElts0, EltBits0, AllowWholeUndefs, + AllowPartialUndefs)) + return false; + if (isAnyInRange(Mask, NumElts, 2 * NumElts) && + !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits, + UndefElts1, EltBits1, AllowWholeUndefs, + AllowPartialUndefs)) + return false; + + UndefElts = APInt::getNullValue(NumElts); + for (int i = 0; i != NumElts; ++i) { + int M = Mask[i]; + if (M < 0) { + UndefElts.setBit(i); + EltBits.push_back(APInt::getNullValue(EltSizeInBits)); + } else if (M < (int)NumElts) { + if (UndefElts0[M]) + UndefElts.setBit(i); + EltBits.push_back(EltBits0[M]); + } else { + if (UndefElts1[M - NumElts]) + UndefElts.setBit(i); + EltBits.push_back(EltBits1[M - NumElts]); + } + } + return true; + } + return false; } diff --git a/llvm/test/CodeGen/X86/known-signbits-vector.ll b/llvm/test/CodeGen/X86/known-signbits-vector.ll index 1e48f86..679e068 100644 --- a/llvm/test/CodeGen/X86/known-signbits-vector.ll +++ b/llvm/test/CodeGen/X86/known-signbits-vector.ll @@ -381,26 +381,24 @@ define <4 x float> @signbits_ashr_sext_select_shuffle_sitofp(<4 x i64> %a0, <4 x ; X32-NEXT: movl %esp, %ebp ; X32-NEXT: andl $-16, %esp ; X32-NEXT: subl $16, %esp -; X32-NEXT: vmovdqa {{.*#+}} xmm3 = [33,0,63,0] -; X32-NEXT: vmovdqa {{.*#+}} xmm4 = [0,2147483648,0,2147483648] -; X32-NEXT: vpsrlq %xmm3, %xmm4, %xmm5 -; X32-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[2,3,0,1] -; X32-NEXT: vpsrlq %xmm6, %xmm4, %xmm4 -; X32-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7] -; X32-NEXT: vextractf128 $1, %ymm2, %xmm5 -; X32-NEXT: vpsrlq %xmm6, %xmm5, %xmm7 -; X32-NEXT: vpsrlq %xmm3, %xmm5, %xmm5 -; X32-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm7[4,5,6,7] -; X32-NEXT: vpsrlq %xmm6, %xmm2, %xmm6 -; X32-NEXT: vpsrlq %xmm3, %xmm2, %xmm2 -; X32-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm6[4,5,6,7] ; X32-NEXT: vpmovsxdq 16(%ebp), %xmm3 -; X32-NEXT: vpxor %xmm4, %xmm5, %xmm5 -; X32-NEXT: vpsubq %xmm4, %xmm5, %xmm5 -; X32-NEXT: vpxor %xmm4, %xmm2, %xmm2 -; X32-NEXT: vpsubq %xmm4, %xmm2, %xmm2 ; X32-NEXT: vpmovsxdq 8(%ebp), %xmm4 -; X32-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2 +; X32-NEXT: vmovdqa {{.*#+}} xmm5 = [0,2147483648,0,2147483648] +; X32-NEXT: vpsrlq $63, %xmm5, %xmm6 +; X32-NEXT: vpsrlq $33, %xmm5, %xmm5 +; X32-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2,3],xmm6[4,5,6,7] +; X32-NEXT: vextractf128 $1, %ymm2, %xmm6 +; X32-NEXT: vpsrlq $63, %xmm6, %xmm7 +; X32-NEXT: vpsrlq $33, %xmm6, %xmm6 +; X32-NEXT: vpblendw {{.*#+}} xmm6 = xmm6[0,1,2,3],xmm7[4,5,6,7] +; X32-NEXT: vpxor %xmm5, %xmm6, %xmm6 +; X32-NEXT: vpsubq %xmm5, %xmm6, %xmm6 +; X32-NEXT: vpsrlq $63, %xmm2, %xmm7 +; X32-NEXT: vpsrlq $33, %xmm2, %xmm2 +; X32-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm7[4,5,6,7] +; X32-NEXT: vpxor %xmm5, %xmm2, %xmm2 +; X32-NEXT: vpsubq %xmm5, %xmm2, %xmm2 +; X32-NEXT: vinsertf128 $1, %xmm6, %ymm2, %ymm2 ; X32-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; X32-NEXT: vextractf128 $1, %ymm1, %xmm4 ; X32-NEXT: vextractf128 $1, %ymm0, %xmm5 diff --git a/llvm/test/CodeGen/X86/packss.ll b/llvm/test/CodeGen/X86/packss.ll index 88257b0..76dd871 100644 --- a/llvm/test/CodeGen/X86/packss.ll +++ b/llvm/test/CodeGen/X86/packss.ll @@ -180,31 +180,21 @@ define <8 x i16> @trunc_ashr_v4i64_demandedelts(<4 x i64> %a0) { ; ; X86-AVX1-LABEL: trunc_ashr_v4i64_demandedelts: ; X86-AVX1: # %bb.0: -; X86-AVX1-NEXT: movl $63, %eax -; X86-AVX1-NEXT: vmovd %eax, %xmm1 -; X86-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm2 -; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] -; X86-AVX1-NEXT: vpsllq %xmm3, %xmm0, %xmm4 -; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm4[4,5,6,7] -; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 -; X86-AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm4 -; X86-AVX1-NEXT: vpsllq %xmm3, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7] -; X86-AVX1-NEXT: vpsrlq %xmm3, %xmm0, %xmm4 -; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7] -; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,2147483648,0,2147483648] -; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm4, %xmm5 -; X86-AVX1-NEXT: vpsrlq %xmm3, %xmm4, %xmm4 -; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm5[0,1,2,3],xmm4[4,5,6,7] -; X86-AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsubq %xmm4, %xmm0, %xmm0 -; X86-AVX1-NEXT: vpsrlq %xmm3, %xmm2, %xmm3 -; X86-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm1 -; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] -; X86-AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm1 -; X86-AVX1-NEXT: vpsubq %xmm4, %xmm1, %xmm1 -; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; X86-AVX1-NEXT: vpsllq $63, %xmm0, %xmm1 +; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; X86-AVX1-NEXT: vpsllq $63, %xmm2, %xmm3 +; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; X86-AVX1-NEXT: vpsrlq $63, %xmm3, %xmm3 +; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,0,0,0,0,0,0,32768] +; X86-AVX1-NEXT: vpxor %xmm3, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpsubq %xmm3, %xmm2, %xmm2 +; X86-AVX1-NEXT: vpsrlq $63, %xmm1, %xmm1 +; X86-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] +; X86-AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm0 +; X86-AVX1-NEXT: vpsubq %xmm3, %xmm0, %xmm0 +; X86-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; X86-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,0,0,0,4,4,4,4] ; X86-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 ; X86-AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll index 4634634..7f12016 100644 --- a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll @@ -1066,29 +1066,25 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind { ; ; X32-AVX1-LABEL: constant_shift_v4i64: ; X32-AVX1: # %bb.0: -; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [31,0,62,0] -; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2147483648,0,2147483648] -; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm3 -; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] -; X32-AVX1-NEXT: vpsrlq %xmm4, %xmm2, %xmm5 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm5[4,5,6,7] -; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 -; X32-AVX1-NEXT: vpsrlq %xmm4, %xmm5, %xmm4 -; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm5, %xmm1 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7] -; X32-AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm1 -; X32-AVX1-NEXT: vpsubq %xmm3, %xmm1, %xmm1 -; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,0,7,0] -; X32-AVX1-NEXT: vpsrlq %xmm3, %xmm2, %xmm4 -; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[2,3,0,1] -; X32-AVX1-NEXT: vpsrlq %xmm5, %xmm2, %xmm2 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7] -; X32-AVX1-NEXT: vpsrlq %xmm5, %xmm0, %xmm4 -; X32-AVX1-NEXT: vpsrlq %xmm3, %xmm0, %xmm0 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5,6,7] -; X32-AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm0 -; X32-AVX1-NEXT: vpsubq %xmm2, %xmm0, %xmm0 -; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648] +; X32-AVX1-NEXT: vpsrlq $62, %xmm1, %xmm2 +; X32-AVX1-NEXT: vpsrlq $31, %xmm1, %xmm3 +; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1,2,3],xmm2[4,5,6,7] +; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; X32-AVX1-NEXT: vpsrlq $62, %xmm3, %xmm4 +; X32-AVX1-NEXT: vpsrlq $31, %xmm3, %xmm3 +; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1,2,3],xmm4[4,5,6,7] +; X32-AVX1-NEXT: vpxor %xmm2, %xmm3, %xmm3 +; X32-AVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2 +; X32-AVX1-NEXT: vpsrlq $7, %xmm1, %xmm3 +; X32-AVX1-NEXT: vpsrlq $1, %xmm1, %xmm1 +; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7] +; X32-AVX1-NEXT: vpsrlq $7, %xmm0, %xmm3 +; X32-AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0 +; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; X32-AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm0 +; X32-AVX1-NEXT: vpsubq %xmm1, %xmm0, %xmm0 +; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; X32-AVX1-NEXT: retl ; ; X32-AVX2-LABEL: constant_shift_v4i64: diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll index 9f54d4e..3212c78 100644 --- a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll @@ -867,17 +867,13 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind { ; ; X32-AVX1-LABEL: constant_shift_v4i64: ; X32-AVX1: # %bb.0: -; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [31,0,62,0] -; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] -; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2 -; X32-AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm1 +; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X32-AVX1-NEXT: vpsrlq $62, %xmm1, %xmm2 +; X32-AVX1-NEXT: vpsrlq $31, %xmm1, %xmm1 ; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,0,7,0] -; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,0,1] -; X32-AVX1-NEXT: vpsrlq %xmm3, %xmm0, %xmm3 -; X32-AVX1-NEXT: vpsrlq %xmm2, %xmm0, %xmm0 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; X32-AVX1-NEXT: vpsrlq $7, %xmm0, %xmm2 +; X32-AVX1-NEXT: vpsrlq $1, %xmm0, %xmm0 +; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] ; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X32-AVX1-NEXT: retl ; diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll index 5c9e8dd..b50836b 100644 --- a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll +++ b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll @@ -793,17 +793,13 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind { ; ; X32-AVX1-LABEL: constant_shift_v4i64: ; X32-AVX1: # %bb.0: -; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [31,0,62,0] -; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] -; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; X32-AVX1-NEXT: vpsllq %xmm2, %xmm3, %xmm2 -; X32-AVX1-NEXT: vpsllq %xmm1, %xmm3, %xmm1 +; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X32-AVX1-NEXT: vpsllq $62, %xmm1, %xmm2 +; X32-AVX1-NEXT: vpsllq $31, %xmm1, %xmm1 ; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7] -; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,0,7,0] -; X32-AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,0,1] -; X32-AVX1-NEXT: vpsllq %xmm3, %xmm0, %xmm3 -; X32-AVX1-NEXT: vpsllq %xmm2, %xmm0, %xmm0 -; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7] +; X32-AVX1-NEXT: vpsllq $7, %xmm0, %xmm2 +; X32-AVX1-NEXT: vpsllq $1, %xmm0, %xmm0 +; X32-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] ; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; X32-AVX1-NEXT: retl ; -- 2.7.4