From 5ebd2b542bedc94ebd041f83a59c567ef5758c10 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 12 Dec 2016 13:33:58 +0000 Subject: [PATCH] [X86][SSE] Add support for combining SSE VSHLI/VSRLI uniform constant shifts. Fixes some missed constant folding opportunities and allows us to combine shuffles that end with a logical bit shift. llvm-svn: 289429 --- llvm/lib/Target/X86/X86ISelLowering.cpp | 33 +++++++++++++++++++++++++++ llvm/test/CodeGen/X86/bswap-vector.ll | 6 ++--- llvm/test/CodeGen/X86/pmul.ll | 15 +++--------- llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll | 30 ++++++++++++------------ llvm/test/CodeGen/X86/vector-lzcnt-128.ll | 12 ++++------ llvm/test/CodeGen/X86/vector-lzcnt-256.ll | 8 ++----- 6 files changed, 58 insertions(+), 46 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 8c09f12..e04087e 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -29761,6 +29761,37 @@ static SDValue combineShift(SDNode* N, SelectionDAG &DAG, return SDValue(); } +static SDValue combineVectorShift(SDNode *N, SelectionDAG &DAG, + TargetLowering::DAGCombinerInfo &DCI, + const X86Subtarget &Subtarget) { + assert((X86ISD::VSHLI == N->getOpcode() || X86ISD::VSRLI == N->getOpcode()) && + "Unexpected opcode"); + EVT VT = N->getValueType(0); + unsigned NumBitsPerElt = VT.getScalarSizeInBits(); + + // This fails for mask register (vXi1) shifts. + if ((NumBitsPerElt % 8) != 0) + return SDValue(); + + // Out of range logical bit shifts are guaranteed to be zero. + APInt ShiftVal = cast(N->getOperand(1))->getAPIntValue(); + if (ShiftVal.zextOrTrunc(8).uge(NumBitsPerElt)) + return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(N)); + + // We can decode 'whole byte' logical bit shifts as shuffles. + if ((ShiftVal.getZExtValue() % 8) == 0) { + SDValue Op(N, 0); + SmallVector NonceMask; // Just a placeholder. + NonceMask.push_back(0); + if (combineX86ShufflesRecursively({Op}, 0, Op, NonceMask, + /*Depth*/ 1, /*HasPSHUFB*/ false, DAG, + DCI, Subtarget)) + return SDValue(); // This routine will use CombineTo to replace N. + } + + return SDValue(); +} + /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for /// OR -> CMPNEQSS. @@ -33127,6 +33158,8 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case ISD::SETCC: return combineSetCC(N, DAG, Subtarget); case X86ISD::SETCC: return combineX86SetCC(N, DAG, DCI, Subtarget); case X86ISD::BRCOND: return combineBrCond(N, DAG, DCI, Subtarget); + case X86ISD::VSHLI: + case X86ISD::VSRLI: return combineVectorShift(N, DAG, DCI, Subtarget); case X86ISD::VSEXT: case X86ISD::VZEXT: return combineVSZext(N, DAG, DCI, Subtarget); case X86ISD::SHUFP: // Handle all target specific shuffles diff --git a/llvm/test/CodeGen/X86/bswap-vector.ll b/llvm/test/CodeGen/X86/bswap-vector.ll index 6697183..fd5983d 100644 --- a/llvm/test/CodeGen/X86/bswap-vector.ll +++ b/llvm/test/CodeGen/X86/bswap-vector.ll @@ -268,14 +268,12 @@ define <4 x i16> @test7(<4 x i16> %v) { ; ; CHECK-SSSE3-LABEL: test7: ; CHECK-SSSE3: # BB#0: # %entry -; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12] -; CHECK-SSSE3-NEXT: psrld $16, %xmm0 +; CHECK-SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0],zero,zero,xmm0[5,4],zero,zero,xmm0[9,8],zero,zero,xmm0[13,12],zero,zero ; CHECK-SSSE3-NEXT: retq ; ; CHECK-AVX-LABEL: test7: ; CHECK-AVX: # BB#0: # %entry -; CHECK-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12] -; CHECK-AVX-NEXT: vpsrld $16, %xmm0, %xmm0 +; CHECK-AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,0],zero,zero,xmm0[5,4],zero,zero,xmm0[9,8],zero,zero,xmm0[13,12],zero,zero ; CHECK-AVX-NEXT: retq ; ; CHECK-WIDE-AVX-LABEL: test7: diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll index 5dbde88..b145a5c 100644 --- a/llvm/test/CodeGen/X86/pmul.ll +++ b/llvm/test/CodeGen/X86/pmul.ll @@ -1301,9 +1301,6 @@ define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) { ; SSE2-NEXT: movdqa %xmm0, %xmm4 ; SSE2-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] -; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [0,4294967295,0,4294967295] -; SSE2-NEXT: pand %xmm3, %xmm2 -; SSE2-NEXT: pand %xmm3, %xmm1 ; SSE2-NEXT: psrlq $32, %xmm1 ; SSE2-NEXT: pmuludq %xmm0, %xmm1 ; SSE2-NEXT: psllq $32, %xmm1 @@ -1320,14 +1317,12 @@ define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) { ; SSE41-NEXT: pxor %xmm3, %xmm3 ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero ; SSE41-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3] -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm3[0,1],xmm1[2,3],xmm3[4,5],xmm1[6,7] -; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7] -; SSE41-NEXT: psrlq $32, %xmm2 -; SSE41-NEXT: pmuludq %xmm0, %xmm2 -; SSE41-NEXT: psllq $32, %xmm2 ; SSE41-NEXT: psrlq $32, %xmm1 ; SSE41-NEXT: pmuludq %xmm4, %xmm1 ; SSE41-NEXT: psllq $32, %xmm1 +; SSE41-NEXT: psrlq $32, %xmm2 +; SSE41-NEXT: pmuludq %xmm0, %xmm2 +; SSE41-NEXT: psllq $32, %xmm2 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,1,3] ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3] ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7] @@ -1336,8 +1331,6 @@ define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) { ; AVX2-LABEL: mul_v4i64_zero_lower: ; AVX2: # BB#0: # %entry ; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2 -; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7] ; AVX2-NEXT: vpsrlq $32, %ymm1, %ymm1 ; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpsllq $32, %ymm0, %ymm0 @@ -1351,8 +1344,6 @@ define <4 x i32> @mul_v4i64_zero_lower(<4 x i32> %val1, <4 x i64> %val2) { ; AVX512-LABEL: mul_v4i64_zero_lower: ; AVX512: # BB#0: # %entry ; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; AVX512-NEXT: vpxor %ymm2, %ymm2, %ymm2 -; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7] ; AVX512-NEXT: vpsrlq $32, %ymm1, %ymm1 ; AVX512-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpsllq $32, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll index 64bfb65..c4ecaa4 100644 --- a/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll +++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-128.ll @@ -172,19 +172,18 @@ define <8 x i16> @test_div7_8i16(<8 x i16> %a) nounwind { define <16 x i8> @test_div7_16i8(<16 x i8> %a) nounwind { ; SSE2-LABEL: test_div7_16i8: ; SSE2: # BB#0: -; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: psrlw $8, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] +; SSE2-NEXT: pmullw %xmm2, %xmm1 ; SSE2-NEXT: psrlw $8, %xmm1 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw %xmm1, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psrlw $8, %xmm3 -; SSE2-NEXT: pmullw %xmm1, %xmm3 +; SSE2-NEXT: pmullw %xmm2, %xmm3 ; SSE2-NEXT: psrlw $8, %xmm3 -; SSE2-NEXT: packuswb %xmm2, %xmm3 +; SSE2-NEXT: packuswb %xmm1, %xmm3 ; SSE2-NEXT: psubb %xmm3, %xmm0 ; SSE2-NEXT: psrlw $1, %xmm0 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0 @@ -465,19 +464,18 @@ define <8 x i16> @test_rem7_8i16(<8 x i16> %a) nounwind { define <16 x i8> @test_rem7_16i8(<16 x i8> %a) nounwind { ; SSE2-LABEL: test_rem7_16i8: ; SSE2: # BB#0: -; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [37,37,37,37,37,37,37,37,37,37,37,37,37,37,37,37] +; SSE2-NEXT: movdqa %xmm0, %xmm1 +; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] +; SSE2-NEXT: psrlw $8, %xmm1 +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [37,0,37,0,37,0,37,0,37,0,37,0,37,0,37,0] +; SSE2-NEXT: pmullw %xmm2, %xmm1 ; SSE2-NEXT: psrlw $8, %xmm1 -; SSE2-NEXT: movdqa %xmm0, %xmm2 -; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] -; SSE2-NEXT: psrlw $8, %xmm2 -; SSE2-NEXT: pmullw %xmm1, %xmm2 -; SSE2-NEXT: psrlw $8, %xmm2 ; SSE2-NEXT: movdqa %xmm0, %xmm3 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: psrlw $8, %xmm3 -; SSE2-NEXT: pmullw %xmm1, %xmm3 +; SSE2-NEXT: pmullw %xmm2, %xmm3 ; SSE2-NEXT: psrlw $8, %xmm3 -; SSE2-NEXT: packuswb %xmm2, %xmm3 +; SSE2-NEXT: packuswb %xmm1, %xmm3 ; SSE2-NEXT: movdqa %xmm0, %xmm1 ; SSE2-NEXT: psubb %xmm3, %xmm1 ; SSE2-NEXT: psrlw $1, %xmm1 diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-128.ll b/llvm/test/CodeGen/X86/vector-lzcnt-128.ll index e5ab0d2..6445a36 100644 --- a/llvm/test/CodeGen/X86/vector-lzcnt-128.ll +++ b/llvm/test/CodeGen/X86/vector-lzcnt-128.ll @@ -1622,11 +1622,9 @@ define <2 x i64> @foldv2i64() nounwind { ; X32-SSE-NEXT: pand %xmm0, %xmm1 ; X32-SSE-NEXT: psrld $16, %xmm0 ; X32-SSE-NEXT: paddd %xmm1, %xmm0 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,4294967295,0,0] -; X32-SSE-NEXT: psrlq $32, %xmm1 -; X32-SSE-NEXT: pand %xmm0, %xmm1 +; X32-SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7] ; X32-SSE-NEXT: psrlq $32, %xmm0 -; X32-SSE-NEXT: paddq %xmm1, %xmm0 +; X32-SSE-NEXT: paddq %xmm2, %xmm0 ; X32-SSE-NEXT: retl %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> , i1 0) ret <2 x i64> %out @@ -1679,11 +1677,9 @@ define <2 x i64> @foldv2i64u() nounwind { ; X32-SSE-NEXT: pand %xmm0, %xmm1 ; X32-SSE-NEXT: psrld $16, %xmm0 ; X32-SSE-NEXT: paddd %xmm1, %xmm0 -; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,4294967295,0,0] -; X32-SSE-NEXT: psrlq $32, %xmm1 -; X32-SSE-NEXT: pand %xmm0, %xmm1 +; X32-SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3,4,5,6,7] ; X32-SSE-NEXT: psrlq $32, %xmm0 -; X32-SSE-NEXT: paddq %xmm1, %xmm0 +; X32-SSE-NEXT: paddq %xmm2, %xmm0 ; X32-SSE-NEXT: retl %out = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> , i1 -1) ret <2 x i64> %out diff --git a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll index e288f32..05cb61a 100644 --- a/llvm/test/CodeGen/X86/vector-lzcnt-256.ll +++ b/llvm/test/CodeGen/X86/vector-lzcnt-256.ll @@ -882,9 +882,7 @@ define <4 x i64> @foldv4i64() nounwind { ; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1 ; X32-AVX-NEXT: vpsrld $16, %ymm0, %ymm0 ; X32-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [0,4294967295,0,0,4294967295,4294967295,0,4294967295] -; X32-AVX-NEXT: vpsrlq $32, %ymm1, %ymm1 -; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1 +; X32-AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5],ymm0[6],ymm4[7] ; X32-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0 ; X32-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; X32-AVX-NEXT: retl @@ -927,9 +925,7 @@ define <4 x i64> @foldv4i64u() nounwind { ; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1 ; X32-AVX-NEXT: vpsrld $16, %ymm0, %ymm0 ; X32-AVX-NEXT: vpaddd %ymm1, %ymm0, %ymm0 -; X32-AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [0,4294967295,0,0,4294967295,4294967295,0,4294967295] -; X32-AVX-NEXT: vpsrlq $32, %ymm1, %ymm1 -; X32-AVX-NEXT: vpand %ymm1, %ymm0, %ymm1 +; X32-AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm0[0],ymm4[1,2,3],ymm0[4],ymm4[5],ymm0[6],ymm4[7] ; X32-AVX-NEXT: vpsrlq $32, %ymm0, %ymm0 ; X32-AVX-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ; X32-AVX-NEXT: retl -- 2.7.4