From 7c56bce996ff74a57e6536f2bb3c23612fd606fb Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Fri, 20 Jul 2018 15:24:12 +0000 Subject: [PATCH] [X86][AVX] Add support for 32/64 bits 256-bit vector horizontal op redundant shuffle removal llvm-svn: 337561 --- llvm/lib/Target/X86/X86ISelLowering.cpp | 16 +++++++++++----- llvm/test/CodeGen/X86/haddsub-shuf.ll | 6 ------ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index d9b42f6..e1b7adf 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -31144,8 +31144,8 @@ static SDValue foldShuffleOfHorizOp(SDNode *N) { // lanes of each operand as: // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3] // ...similarly for v2f64 and v8i16. - // TODO: 256-bit is not the same because...x86. - if (HOp.getOperand(0) != HOp.getOperand(1) || HOp.getValueSizeInBits() != 128) + // TODO: Handle UNDEF operands. + if (HOp.getOperand(0) != HOp.getOperand(1)) return SDValue(); // When the operands of a horizontal math op are identical, the low half of @@ -31156,9 +31156,15 @@ static SDValue foldShuffleOfHorizOp(SDNode *N) { // TODO: Other mask possibilities like {1,1} and {1,0} could be added here, // but this should be tied to whatever horizontal op matching and shuffle // canonicalization are producing. - if (isTargetShuffleEquivalent(Mask, { 0, 0 }) || - isTargetShuffleEquivalent(Mask, { 0, 1, 0, 1 }) || - isTargetShuffleEquivalent(Mask, { 0, 1, 2, 3, 0, 1, 2, 3 })) + if (HOp.getValueSizeInBits() == 128 && + (isTargetShuffleEquivalent(Mask, {0, 0}) || + isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) || + isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3}))) + return HOp; + + if (HOp.getValueSizeInBits() == 256 && + (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) || + isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}))) return HOp; return SDValue(); diff --git a/llvm/test/CodeGen/X86/haddsub-shuf.ll b/llvm/test/CodeGen/X86/haddsub-shuf.ll index 8ede703..1cb9bcc 100644 --- a/llvm/test/CodeGen/X86/haddsub-shuf.ll +++ b/llvm/test/CodeGen/X86/haddsub-shuf.ll @@ -63,7 +63,6 @@ define <8 x float> @hadd_v8f32b(<8 x float> %a) { ; AVX-LABEL: hadd_v8f32b: ; AVX: # %bb.0: ; AVX-NEXT: vhaddps %ymm0, %ymm0, %ymm0 -; AVX-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] ; AVX-NEXT: retq %a0 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> %a1 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> @@ -129,7 +128,6 @@ define <8 x float> @hsub_v8f32b(<8 x float> %a) { ; AVX-LABEL: hsub_v8f32b: ; AVX: # %bb.0: ; AVX-NEXT: vhsubps %ymm0, %ymm0, %ymm0 -; AVX-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] ; AVX-NEXT: retq %a0 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> %a1 = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> @@ -165,7 +163,6 @@ define <4 x double> @hadd_v4f64(<4 x double> %a) { ; AVX-LABEL: hadd_v4f64: ; AVX: # %bb.0: ; AVX-NEXT: vhaddpd %ymm0, %ymm0, %ymm0 -; AVX-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] ; AVX-NEXT: retq %a0 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> %a1 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> @@ -201,7 +198,6 @@ define <4 x double> @hsub_v4f64(<4 x double> %a) { ; AVX-LABEL: hsub_v4f64: ; AVX: # %bb.0: ; AVX-NEXT: vhsubpd %ymm0, %ymm0, %ymm0 -; AVX-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] ; AVX-NEXT: retq %a0 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> %a1 = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> @@ -279,7 +275,6 @@ define <8 x i32> @hadd_v8i32b(<8 x i32> %a) { ; AVX2-LABEL: hadd_v8i32b: ; AVX2: # %bb.0: ; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0 -; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,0,1,4,5,4,5] ; AVX2-NEXT: retq %a0 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> %a1 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> @@ -357,7 +352,6 @@ define <8 x i32> @hsub_v8i32b(<8 x i32> %a) { ; AVX2-LABEL: hsub_v8i32b: ; AVX2: # %bb.0: ; AVX2-NEXT: vphsubd %ymm0, %ymm0, %ymm0 -; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,0,1,4,5,4,5] ; AVX2-NEXT: retq %a0 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> %a1 = shufflevector <8 x i32> %a, <8 x i32> undef, <8 x i32> -- 2.7.4