From: Simon Pilgrim Date: Mon, 21 Nov 2016 14:36:19 +0000 (+0000) Subject: [SelectionDAG] Add ComputeNumSignBits support for CONCAT_VECTORS opcode X-Git-Tag: llvmorg-4.0.0-rc1~4004 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=49d7eda9680b4bd3d22132dd47a63fdab058beb9;p=platform%2Fupstream%2Fllvm.git [SelectionDAG] Add ComputeNumSignBits support for CONCAT_VECTORS opcode llvm-svn: 287541 --- diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index ef560ac..c323072 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2874,6 +2874,13 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { return ComputeNumSignBits(Op.getOperand(0), Depth+1); break; } + case ISD::CONCAT_VECTORS: + // Determine the minimum number of sign bits across all input vectors. + // Early out if the result is already 1. + Tmp = ComputeNumSignBits(Op.getOperand(0), Depth + 1); + for (unsigned i = 1, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) + Tmp = std::min(Tmp, ComputeNumSignBits(Op.getOperand(i), Depth + 1)); + return Tmp; } // If we are looking at the loaded value of the SDNode. diff --git a/llvm/test/CodeGen/X86/packss.ll b/llvm/test/CodeGen/X86/packss.ll index 44ecb40..5cd649b 100644 --- a/llvm/test/CodeGen/X86/packss.ll +++ b/llvm/test/CodeGen/X86/packss.ll @@ -88,44 +88,22 @@ define <8 x i16> @trunc_ashr_v4i32_icmp_v4i32(<4 x i32> %a, <4 x i32> %b) nounwi ; X32-SSE: # BB#0: ; X32-SSE-NEXT: psrad $31, %xmm0 ; X32-SSE-NEXT: pcmpgtd {{\.LCPI.*}}, %xmm1 -; X32-SSE-NEXT: pslld $16, %xmm1 -; X32-SSE-NEXT: psrad $16, %xmm1 -; X32-SSE-NEXT: pslld $16, %xmm0 -; X32-SSE-NEXT: psrad $16, %xmm0 -; X32-SSE-NEXT: packssdw %xmm1, %xmm0 +; X32-SSE-NEXT: packsswb %xmm1, %xmm0 ; X32-SSE-NEXT: retl ; ; X64-SSE-LABEL: trunc_ashr_v4i32_icmp_v4i32: ; X64-SSE: # BB#0: ; X64-SSE-NEXT: psrad $31, %xmm0 ; X64-SSE-NEXT: pcmpgtd {{.*}}(%rip), %xmm1 -; X64-SSE-NEXT: pslld $16, %xmm1 -; X64-SSE-NEXT: psrad $16, %xmm1 -; X64-SSE-NEXT: pslld $16, %xmm0 -; X64-SSE-NEXT: psrad $16, %xmm0 -; X64-SSE-NEXT: packssdw %xmm1, %xmm0 +; X64-SSE-NEXT: packsswb %xmm1, %xmm0 ; X64-SSE-NEXT: retq ; -; X64-AVX1-LABEL: trunc_ashr_v4i32_icmp_v4i32: -; X64-AVX1: # BB#0: -; X64-AVX1-NEXT: vpsrad $31, %xmm0, %xmm0 -; X64-AVX1-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm1 -; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; X64-AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; X64-AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0 -; X64-AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; X64-AVX1-NEXT: retq -; -; X64-AVX2-LABEL: trunc_ashr_v4i32_icmp_v4i32: -; X64-AVX2: # BB#0: -; X64-AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 -; X64-AVX2-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm1 -; X64-AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0 -; X64-AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero -; X64-AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] -; X64-AVX2-NEXT: # kill: %XMM0 %XMM0 %YMM0 -; X64-AVX2-NEXT: vzeroupper -; X64-AVX2-NEXT: retq +; X64-AVX-LABEL: trunc_ashr_v4i32_icmp_v4i32: +; X64-AVX: # BB#0: +; X64-AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; X64-AVX-NEXT: vpcmpgtd {{.*}}(%rip), %xmm1, %xmm1 +; X64-AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0 +; X64-AVX-NEXT: retq %1 = ashr <4 x i32> %a, %2 = icmp sgt <4 x i32> %b, %3 = sext <4 x i1> %2 to <4 x i32>