From 6205ba0e7f9157a849a1ddaf76a2ea213bc95e7c Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Mon, 8 Oct 2018 19:48:18 +0000 Subject: [PATCH] [x86] add tests for phaddd/phaddw; NFC More tests related to PR39195: https://bugs.llvm.org/show_bug.cgi?id=39195 If we limit the horizontal codegen, it may require different constraints for FP and integer. llvm-svn: 343994 --- llvm/test/CodeGen/X86/phaddsub.ll | 240 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 240 insertions(+) diff --git a/llvm/test/CodeGen/X86/phaddsub.ll b/llvm/test/CodeGen/X86/phaddsub.ll index 64f8935..5d7c77b 100644 --- a/llvm/test/CodeGen/X86/phaddsub.ll +++ b/llvm/test/CodeGen/X86/phaddsub.ll @@ -283,3 +283,243 @@ define <4 x i32> @phsubd1_reverse(<4 x i32> %x, <4 x i32> %y) { ret <4 x i32> %r } +define <4 x i32> @phaddd_single_source1(<4 x i32> %x) { +; SSSE3-LABEL: phaddd_single_source1: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,2] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] +; SSSE3-NEXT: paddd %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; AVX-LABEL: phaddd_single_source1: +; AVX: # %bb.0: +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,2] +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] +; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %l = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> + %r = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> + %add = add <4 x i32> %l, %r + ret <4 x i32> %add +} + +define <4 x i32> @phaddd_single_source2(<4 x i32> %x) { +; SSSE3-LABEL: phaddd_single_source2: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,2] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] +; SSSE3-NEXT: paddd %xmm1, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,2,2,3] +; SSSE3-NEXT: retq +; +; AVX-LABEL: phaddd_single_source2: +; AVX: # %bb.0: +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,2] +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] +; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3] +; AVX-NEXT: retq + %l = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> + %r = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> + %add = add <4 x i32> %l, %r + %shuffle2 = shufflevector <4 x i32> %add, <4 x i32> undef, <4 x i32> + ret <4 x i32> %shuffle2 +} + +define <4 x i32> @phaddd_single_source3(<4 x i32> %x) { +; SSSE3-LABEL: phaddd_single_source3: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] +; SSSE3-NEXT: paddd %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; AVX-LABEL: phaddd_single_source3: +; AVX: # %bb.0: +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %l = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> + %r = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> + %add = add <4 x i32> %l, %r + ret <4 x i32> %add +} + +define <4 x i32> @phaddd_single_source4(<4 x i32> %x) { +; SSSE3-LABEL: phaddd_single_source4: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,2,2] +; SSSE3-NEXT: paddd %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; AVX-LABEL: phaddd_single_source4: +; AVX: # %bb.0: +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,2,2] +; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %l = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> + %add = add <4 x i32> %l, %x + ret <4 x i32> %add +} + +define <4 x i32> @phaddd_single_source5(<4 x i32> %x) { +; SSSE3-LABEL: phaddd_single_source5: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,2,2] +; SSSE3-NEXT: paddd %xmm0, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3] +; SSSE3-NEXT: retq +; +; AVX-LABEL: phaddd_single_source5: +; AVX: # %bb.0: +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,2,2] +; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX-NEXT: retq + %l = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> + %add = add <4 x i32> %l, %x + %shuffle2 = shufflevector <4 x i32> %add, <4 x i32> undef, <4 x i32> + ret <4 x i32> %shuffle2 +} + +define <4 x i32> @phaddd_single_source6(<4 x i32> %x) { +; SSSE3-LABEL: phaddd_single_source6: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3] +; SSSE3-NEXT: paddd %xmm1, %xmm0 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] +; SSSE3-NEXT: retq +; +; AVX-LABEL: phaddd_single_source6: +; AVX: # %bb.0: +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3] +; AVX-NEXT: retq + %l = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> + %r = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> + %add = add <4 x i32> %l, %r + %shuffle2 = shufflevector <4 x i32> %add, <4 x i32> undef, <4 x i32> + ret <4 x i32> %shuffle2 +} + +define <8 x i16> @phaddw_single_source1(<8 x i16> %x) { +; SSSE3-LABEL: phaddw_single_source1: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa %xmm0, %xmm1 +; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,12,13] +; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6,7,2,3,4,5,6,7,2,3,6,7,10,11,14,15] +; SSSE3-NEXT: paddw %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; AVX-LABEL: phaddw_single_source1: +; AVX: # %bb.0: +; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,4,5,4,5,6,7,0,1,4,5,8,9,12,13] +; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,7,2,3,4,5,6,7,2,3,6,7,10,11,14,15] +; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %l = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> + %r = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> + %add = add <8 x i16> %l, %r + ret <8 x i16> %add +} + +define <8 x i16> @phaddw_single_source2(<8 x i16> %x) { +; SSSE3-LABEL: phaddw_single_source2: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3] +; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] +; SSSE3-NEXT: paddw %xmm1, %xmm0 +; SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,6,7] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] +; SSSE3-NEXT: retq +; +; AVX-LABEL: phaddw_single_source2: +; AVX: # %bb.0: +; AVX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7] +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,3] +; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] +; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,6,7] +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] +; AVX-NEXT: retq + %l = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> + %r = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> + %add = add <8 x i16> %l, %r + %shuffle2 = shufflevector <8 x i16> %add, <8 x i16> undef, <8 x i32> + ret <8 x i16> %shuffle2 +} + +define <8 x i16> @phaddw_single_source3(<8 x i16> %x) { +; SSSE3-LABEL: phaddw_single_source3: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3] +; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] +; SSSE3-NEXT: paddw %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; AVX-LABEL: phaddw_single_source3: +; AVX: # %bb.0: +; AVX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7] +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,3] +; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] +; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] +; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %l = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> + %r = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> + %add = add <8 x i16> %l, %r + ret <8 x i16> %add +} + +define <8 x i16> @phaddw_single_source4(<8 x i16> %x) { +; SSSE3-LABEL: phaddw_single_source4: +; SSSE3: # %bb.0: +; SSSE3-NEXT: movdqa %xmm0, %xmm1 +; SSSE3-NEXT: pslld $16, %xmm1 +; SSSE3-NEXT: paddw %xmm0, %xmm1 +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; AVX-LABEL: phaddw_single_source4: +; AVX: # %bb.0: +; AVX-NEXT: vpslld $16, %xmm0, %xmm1 +; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %l = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> + %add = add <8 x i16> %l, %x + ret <8 x i16> %add +} + +define <8 x i16> @phaddw_single_source6(<8 x i16> %x) { +; SSSE3-LABEL: phaddw_single_source6: +; SSSE3: # %bb.0: +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3] +; SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7] +; SSSE3-NEXT: paddw %xmm1, %xmm0 +; SSSE3-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero +; SSSE3-NEXT: retq +; +; AVX-LABEL: phaddw_single_source6: +; AVX: # %bb.0: +; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1] +; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero +; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0 +; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero +; AVX-NEXT: retq + %l = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> + %r = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> + %add = add <8 x i16> %l, %r + %shuffle2 = shufflevector <8 x i16> %add, <8 x i16> undef, <8 x i32> + ret <8 x i16> %shuffle2 +} + -- 2.7.4