From 1a8d790cf5f89c1df718844f13e934e39bef6ef5 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 13 Aug 2019 10:51:39 +0000 Subject: [PATCH] [X86] SimplifyDemandedVectorElts - attempt to recombine target shuffle using DemandedElts mask (reapplied) If we don't demand all elements, then attempt to combine to a simpler shuffle. At the moment we can only do this if Depth == 0 as combineX86ShufflesRecursively uses Depth to track whether the shuffle has really changed or not - we'll need to change this before we can properly start merging combineX86ShufflesRecursively into SimplifyDemandedVectorElts. The insertps-combine.ll regression is because XFormVExtractWithShuffleIntoLoad can't see through shuffles of different widths - this will be fixed in a follow-up commit. Reapplying this as rL368307 had to be reverted as part of rL368660 to revert rL368276 llvm-svn: 368662 --- llvm/lib/Target/X86/X86ISelLowering.cpp | 17 ++ .../CodeGen/X86/avx512-intrinsics-fast-isel.ll | 16 +- llvm/test/CodeGen/X86/insertps-combine.ll | 5 +- llvm/test/CodeGen/X86/shrink_vmul.ll | 202 +++++++++++---------- llvm/test/CodeGen/X86/vec_smulo.ll | 152 ++++++++-------- llvm/test/CodeGen/X86/vec_umulo.ll | 140 +++++++------- 6 files changed, 276 insertions(+), 256 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index a82f213..66d7b76 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -33920,6 +33920,23 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode( return true; } + // If we don't demand all elements, then attempt to combine to a simpler + // shuffle. + // TODO: Handle other depths, but first we need to handle the fact that + // it might combine to the same shuffle. + if (!DemandedElts.isAllOnesValue() && Depth == 0) { + SmallVector DemandedMask(NumElts, SM_SentinelUndef); + for (int i = 0; i != NumElts; ++i) + if (DemandedElts[i]) + DemandedMask[i] = i; + + SDValue NewShuffle = combineX86ShufflesRecursively( + {Op}, 0, Op, DemandedMask, {}, Depth, /*HasVarMask*/ false, + /*AllowVarMask*/ true, TLO.DAG, Subtarget); + if (NewShuffle) + return TLO.CombineTo(Op, NewShuffle); + } + // Extract known zero/undef elements. // TODO - Propagate input undef/zero elts. for (int i = 0; i != NumElts; ++i) { diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll index d7611d1..4da750e 100644 --- a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll @@ -8319,7 +8319,7 @@ define float @test_mm512_reduce_max_ps(<16 x float> %__W) { ; X86-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; X86-NEXT: vmaxps %xmm1, %xmm0, %xmm0 -; X86-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2] +; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ; X86-NEXT: vmovss %xmm0, (%esp) ; X86-NEXT: flds (%esp) @@ -8336,7 +8336,7 @@ define float @test_mm512_reduce_max_ps(<16 x float> %__W) { ; X64-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; X64-NEXT: vmaxps %xmm1, %xmm0, %xmm0 -; X64-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2] +; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; X64-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ; X64-NEXT: vzeroupper ; X64-NEXT: retq @@ -8445,7 +8445,7 @@ define float @test_mm512_reduce_min_ps(<16 x float> %__W) { ; X86-NEXT: vminps %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; X86-NEXT: vminps %xmm1, %xmm0, %xmm0 -; X86-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2] +; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; X86-NEXT: vminss %xmm1, %xmm0, %xmm0 ; X86-NEXT: vmovss %xmm0, (%esp) ; X86-NEXT: flds (%esp) @@ -8462,7 +8462,7 @@ define float @test_mm512_reduce_min_ps(<16 x float> %__W) { ; X64-NEXT: vminps %xmm1, %xmm0, %xmm0 ; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; X64-NEXT: vminps %xmm1, %xmm0, %xmm0 -; X64-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2] +; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; X64-NEXT: vminss %xmm1, %xmm0, %xmm0 ; X64-NEXT: vzeroupper ; X64-NEXT: retq @@ -8623,7 +8623,7 @@ define float @test_mm512_mask_reduce_max_ps(i16 zeroext %__M, <16 x float> %__W) ; X86-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; X86-NEXT: vmaxps %xmm1, %xmm0, %xmm0 -; X86-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2] +; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ; X86-NEXT: vmovss %xmm0, (%esp) ; X86-NEXT: flds (%esp) @@ -8643,7 +8643,7 @@ define float @test_mm512_mask_reduce_max_ps(i16 zeroext %__M, <16 x float> %__W) ; X64-NEXT: vmaxps %xmm1, %xmm0, %xmm0 ; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; X64-NEXT: vmaxps %xmm1, %xmm0, %xmm0 -; X64-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2] +; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; X64-NEXT: vmaxss %xmm1, %xmm0, %xmm0 ; X64-NEXT: vzeroupper ; X64-NEXT: retq @@ -8808,7 +8808,7 @@ define float @test_mm512_mask_reduce_min_ps(i16 zeroext %__M, <16 x float> %__W) ; X86-NEXT: vminps %xmm1, %xmm0, %xmm0 ; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; X86-NEXT: vminps %xmm1, %xmm0, %xmm0 -; X86-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2] +; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; X86-NEXT: vminss %xmm1, %xmm0, %xmm0 ; X86-NEXT: vmovss %xmm0, (%esp) ; X86-NEXT: flds (%esp) @@ -8828,7 +8828,7 @@ define float @test_mm512_mask_reduce_min_ps(i16 zeroext %__M, <16 x float> %__W) ; X64-NEXT: vminps %xmm1, %xmm0, %xmm0 ; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0] ; X64-NEXT: vminps %xmm1, %xmm0, %xmm0 -; X64-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2] +; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3] ; X64-NEXT: vminss %xmm1, %xmm0, %xmm0 ; X64-NEXT: vzeroupper ; X64-NEXT: retq diff --git a/llvm/test/CodeGen/X86/insertps-combine.ll b/llvm/test/CodeGen/X86/insertps-combine.ll index 6bef76e..98ed157 100644 --- a/llvm/test/CodeGen/X86/insertps-combine.ll +++ b/llvm/test/CodeGen/X86/insertps-combine.ll @@ -285,12 +285,13 @@ define float @extract_lane_insertps_5123(<4 x float> %a0, <4 x float> *%p1) { define float @extract_lane_insertps_6123(<4 x float> %a0, <4 x float> *%p1) { ; SSE-LABEL: extract_lane_insertps_6123: ; SSE: # %bb.0: -; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1] ; SSE-NEXT: retq ; ; AVX-LABEL: extract_lane_insertps_6123: ; AVX: # %bb.0: -; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = mem[1,0] ; AVX-NEXT: retq %a1 = load <4 x float>, <4 x float> *%p1 %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 128) diff --git a/llvm/test/CodeGen/X86/shrink_vmul.ll b/llvm/test/CodeGen/X86/shrink_vmul.ll index 5ceb299..1c1032a 100644 --- a/llvm/test/CodeGen/X86/shrink_vmul.ll +++ b/llvm/test/CodeGen/X86/shrink_vmul.ll @@ -2085,85 +2085,88 @@ define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) nounwind { ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-SSE-NEXT: movdqa (%eax), %xmm5 -; X86-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X86-SSE-NEXT: movdqa (%ecx), %xmm2 ; X86-SSE-NEXT: movdqa 16(%ecx), %xmm6 -; X86-SSE-NEXT: pxor %xmm0, %xmm0 -; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; X86-SSE-NEXT: pxor %xmm1, %xmm1 +; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; X86-SSE-NEXT: movdqa %xmm5, %xmm4 -; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3] -; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] -; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[3,1,2,3] -; X86-SSE-NEXT: movd %xmm0, %eax -; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[3,1,2,3] -; X86-SSE-NEXT: movd %xmm0, %esi +; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3] +; X86-SSE-NEXT: movdqa %xmm5, %xmm3 +; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7] +; X86-SSE-NEXT: movdqa %xmm5, %xmm1 +; X86-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; X86-SSE-NEXT: movd %xmm1, %eax +; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[3,1,2,3] +; X86-SSE-NEXT: movd %xmm1, %esi ; X86-SSE-NEXT: xorl %edx, %edx ; X86-SSE-NEXT: divl %esi -; X86-SSE-NEXT: movd %edx, %xmm0 -; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[2,3,0,1] -; X86-SSE-NEXT: movd %xmm3, %eax -; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,0,1] -; X86-SSE-NEXT: movd %xmm3, %esi +; X86-SSE-NEXT: movd %edx, %xmm1 +; X86-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[2,3,0,1] +; X86-SSE-NEXT: movd %xmm7, %eax +; X86-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[2,3,0,1] +; X86-SSE-NEXT: movd %xmm7, %esi ; X86-SSE-NEXT: xorl %edx, %edx ; X86-SSE-NEXT: divl %esi ; X86-SSE-NEXT: movd %edx, %xmm7 -; X86-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1] -; X86-SSE-NEXT: movd %xmm5, %eax +; X86-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1] +; X86-SSE-NEXT: movd %xmm3, %eax ; X86-SSE-NEXT: movd %xmm6, %esi ; X86-SSE-NEXT: xorl %edx, %edx ; X86-SSE-NEXT: divl %esi +; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3] +; X86-SSE-NEXT: movd %xmm3, %eax ; X86-SSE-NEXT: movd %edx, %xmm3 -; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3] -; X86-SSE-NEXT: movd %xmm5, %eax -; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,2,3] -; X86-SSE-NEXT: movd %xmm5, %esi -; X86-SSE-NEXT: xorl %edx, %edx -; X86-SSE-NEXT: divl %esi -; X86-SSE-NEXT: movd %edx, %xmm5 -; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] -; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0] -; X86-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm4[3,1,2,3] -; X86-SSE-NEXT: movd %xmm6, %eax -; X86-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[3,1,2,3] +; X86-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,2,3] ; X86-SSE-NEXT: movd %xmm6, %esi ; X86-SSE-NEXT: xorl %edx, %edx ; X86-SSE-NEXT: divl %esi ; X86-SSE-NEXT: movd %edx, %xmm6 -; X86-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,3,0,1] +; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1] +; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0] +; X86-SSE-NEXT: movdqa %xmm5, %xmm7 +; X86-SSE-NEXT: psrld $16, %xmm7 ; X86-SSE-NEXT: movd %xmm7, %eax -; X86-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,3,0,1] +; X86-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,2,3] ; X86-SSE-NEXT: movd %xmm7, %esi ; X86-SSE-NEXT: xorl %edx, %edx ; X86-SSE-NEXT: divl %esi ; X86-SSE-NEXT: movd %edx, %xmm7 -; X86-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] ; X86-SSE-NEXT: movd %xmm4, %eax ; X86-SSE-NEXT: movd %xmm2, %esi ; X86-SSE-NEXT: xorl %edx, %edx ; X86-SSE-NEXT: divl %esi -; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3] +; X86-SSE-NEXT: psrlq $48, %xmm5 +; X86-SSE-NEXT: movd %xmm5, %eax +; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[3,1,2,3] +; X86-SSE-NEXT: movd %xmm5, %esi +; X86-SSE-NEXT: movd %edx, %xmm5 +; X86-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] +; X86-SSE-NEXT: xorl %edx, %edx +; X86-SSE-NEXT: divl %esi +; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1] ; X86-SSE-NEXT: movd %xmm4, %eax ; X86-SSE-NEXT: movd %edx, %xmm4 -; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3] +; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] ; X86-SSE-NEXT: movd %xmm2, %esi ; X86-SSE-NEXT: xorl %edx, %edx ; X86-SSE-NEXT: divl %esi ; X86-SSE-NEXT: movd %edx, %xmm2 +; X86-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] +; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm2[0] +; X86-SSE-NEXT: movd %xmm0, %eax +; X86-SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,0],xmm4[0,0] +; X86-SSE-NEXT: movdqa {{.*#+}} xmm0 = [8199,8199,8199,8199] +; X86-SSE-NEXT: pmuludq %xmm0, %xmm7 +; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,2,2,3] +; X86-SSE-NEXT: pmuludq %xmm0, %xmm5 +; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,2,3] ; X86-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] -; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm7[0] -; X86-SSE-NEXT: movd %xmm1, %eax -; X86-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm6[0,0] -; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [8199,8199,8199,8199] -; X86-SSE-NEXT: pmuludq %xmm1, %xmm4 -; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3] -; X86-SSE-NEXT: pmuludq %xmm1, %xmm2 -; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; X86-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] -; X86-SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,0],xmm0[0,0] -; X86-SSE-NEXT: pmuludq %xmm1, %xmm3 -; X86-SSE-NEXT: pmuludq %xmm1, %xmm5 +; X86-SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,0],xmm1[0,0] +; X86-SSE-NEXT: pmuludq %xmm0, %xmm3 +; X86-SSE-NEXT: pmuludq %xmm0, %xmm6 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3] -; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3] +; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,2,2,3] ; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; X86-SSE-NEXT: xorl %edx, %edx ; X86-SSE-NEXT: divl 32(%ecx) @@ -2324,92 +2327,95 @@ define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) nounwind { ; X64-SSE-LABEL: PR34947: ; X64-SSE: # %bb.0: ; X64-SSE-NEXT: movdqa (%rdi), %xmm5 -; X64-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X64-SSE-NEXT: movdqa (%rsi), %xmm2 ; X64-SSE-NEXT: movdqa 16(%rsi), %xmm6 -; X64-SSE-NEXT: pxor %xmm0, %xmm0 -; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; X64-SSE-NEXT: pxor %xmm1, %xmm1 +; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; X64-SSE-NEXT: movdqa %xmm5, %xmm3 -; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3] -; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7] -; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[3,1,2,3] -; X64-SSE-NEXT: movd %xmm0, %eax -; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[3,1,2,3] -; X64-SSE-NEXT: movd %xmm0, %ecx +; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3] +; X64-SSE-NEXT: movdqa %xmm5, %xmm7 +; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7] +; X64-SSE-NEXT: movdqa %xmm5, %xmm1 +; X64-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; X64-SSE-NEXT: movd %xmm1, %eax +; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[3,1,2,3] +; X64-SSE-NEXT: movd %xmm1, %ecx ; X64-SSE-NEXT: xorl %edx, %edx ; X64-SSE-NEXT: divl %ecx ; X64-SSE-NEXT: movd %edx, %xmm8 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1] -; X64-SSE-NEXT: movd %xmm4, %eax -; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[2,3,0,1] -; X64-SSE-NEXT: movd %xmm4, %ecx +; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1] +; X64-SSE-NEXT: movd %xmm1, %eax +; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,3,0,1] +; X64-SSE-NEXT: movd %xmm1, %ecx ; X64-SSE-NEXT: xorl %edx, %edx ; X64-SSE-NEXT: divl %ecx -; X64-SSE-NEXT: movd %edx, %xmm7 -; X64-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1] -; X64-SSE-NEXT: movd %xmm5, %eax +; X64-SSE-NEXT: movd %edx, %xmm1 +; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1] +; X64-SSE-NEXT: movd %xmm7, %eax ; X64-SSE-NEXT: movd %xmm6, %ecx ; X64-SSE-NEXT: xorl %edx, %edx ; X64-SSE-NEXT: divl %ecx ; X64-SSE-NEXT: movd %edx, %xmm4 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3] -; X64-SSE-NEXT: movd %xmm5, %eax -; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,2,3] -; X64-SSE-NEXT: movd %xmm5, %ecx -; X64-SSE-NEXT: xorl %edx, %edx -; X64-SSE-NEXT: divl %ecx -; X64-SSE-NEXT: movd %edx, %xmm5 -; X64-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] -; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm7[0] -; X64-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[3,1,2,3] -; X64-SSE-NEXT: movd %xmm6, %eax -; X64-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[3,1,2,3] +; X64-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,2,3] +; X64-SSE-NEXT: movd %xmm7, %eax +; X64-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,2,3] ; X64-SSE-NEXT: movd %xmm6, %ecx ; X64-SSE-NEXT: xorl %edx, %edx ; X64-SSE-NEXT: divl %ecx ; X64-SSE-NEXT: movd %edx, %xmm6 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[2,3,0,1] -; X64-SSE-NEXT: movd %xmm7, %eax -; X64-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,3,0,1] -; X64-SSE-NEXT: movd %xmm7, %ecx +; X64-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] +; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm1[0] +; X64-SSE-NEXT: movdqa %xmm5, %xmm1 +; X64-SSE-NEXT: psrld $16, %xmm1 +; X64-SSE-NEXT: movd %xmm1, %eax +; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,2,3] +; X64-SSE-NEXT: movd %xmm1, %ecx ; X64-SSE-NEXT: xorl %edx, %edx ; X64-SSE-NEXT: divl %ecx ; X64-SSE-NEXT: movd %edx, %xmm7 -; X64-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1] ; X64-SSE-NEXT: movd %xmm3, %eax ; X64-SSE-NEXT: movd %xmm2, %ecx ; X64-SSE-NEXT: xorl %edx, %edx ; X64-SSE-NEXT: divl %ecx -; X64-SSE-NEXT: movd %edx, %xmm0 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3] +; X64-SSE-NEXT: movd %edx, %xmm1 +; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1] +; X64-SSE-NEXT: psrlq $48, %xmm5 +; X64-SSE-NEXT: movd %xmm5, %eax +; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[3,1,2,3] +; X64-SSE-NEXT: movd %xmm5, %ecx +; X64-SSE-NEXT: xorl %edx, %edx +; X64-SSE-NEXT: divl %ecx +; X64-SSE-NEXT: movd %edx, %xmm5 +; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] ; X64-SSE-NEXT: movd %xmm3, %eax -; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3] +; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] ; X64-SSE-NEXT: movd %xmm2, %ecx ; X64-SSE-NEXT: xorl %edx, %edx ; X64-SSE-NEXT: divl %ecx ; X64-SSE-NEXT: movd %edx, %xmm2 -; X64-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm7[0] -; X64-SSE-NEXT: movd %xmm1, %eax +; X64-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1] +; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; X64-SSE-NEXT: movd %xmm0, %eax ; X64-SSE-NEXT: xorl %edx, %edx ; X64-SSE-NEXT: divl 32(%rsi) -; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = [8199,8199,8199,8199] -; X64-SSE-NEXT: pmuludq %xmm1, %xmm0 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; X64-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm6[0,0] -; X64-SSE-NEXT: pmuludq %xmm1, %xmm2 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3] -; X64-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; X64-SSE-NEXT: pmuludq %xmm1, %xmm4 +; X64-SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,0],xmm5[0,0] +; X64-SSE-NEXT: movdqa {{.*#+}} xmm0 = [8199,8199,8199,8199] +; X64-SSE-NEXT: pmuludq %xmm0, %xmm7 +; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,2,2,3] +; X64-SSE-NEXT: pmuludq %xmm0, %xmm1 +; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] +; X64-SSE-NEXT: pmuludq %xmm0, %xmm4 ; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3] -; X64-SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,0],xmm8[0,0] -; X64-SSE-NEXT: pmuludq %xmm1, %xmm5 -; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3] -; X64-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; X64-SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,0],xmm8[0,0] +; X64-SSE-NEXT: pmuludq %xmm0, %xmm6 +; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,2,2,3] +; X64-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1] ; X64-SSE-NEXT: imull $8199, %edx, %eax # imm = 0x2007 ; X64-SSE-NEXT: movl %eax, (%rax) ; X64-SSE-NEXT: movdqa %xmm2, (%rax) -; X64-SSE-NEXT: movdqa %xmm0, (%rax) +; X64-SSE-NEXT: movdqa %xmm1, (%rax) ; X64-SSE-NEXT: retq ; ; X64-AVX1-LABEL: PR34947: diff --git a/llvm/test/CodeGen/X86/vec_smulo.ll b/llvm/test/CodeGen/X86/vec_smulo.ll index 385c556..a34824d 100644 --- a/llvm/test/CodeGen/X86/vec_smulo.ll +++ b/llvm/test/CodeGen/X86/vec_smulo.ll @@ -195,17 +195,17 @@ define <3 x i32> @smulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; SSE2-NEXT: psubd %xmm2, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSE2-NEXT: movq %xmm0, (%rdi) -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSE2-NEXT: movq %xmm2, (%rdi) ; SSE2-NEXT: psrad $31, %xmm2 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm2 -; SSE2-NEXT: pcmpeqd %xmm0, %xmm0 -; SSE2-NEXT: pxor %xmm2, %xmm0 -; SSE2-NEXT: movd %xmm1, 8(%rdi) +; SSE2-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE2-NEXT: pxor %xmm2, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; SSE2-NEXT: movd %xmm0, 8(%rdi) +; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: smulo_v3i32: @@ -225,17 +225,17 @@ define <3 x i32> @smulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun ; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3] ; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] ; SSSE3-NEXT: psubd %xmm2, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3] ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] -; SSSE3-NEXT: movq %xmm0, (%rdi) -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] -; SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] +; SSSE3-NEXT: movq %xmm2, (%rdi) ; SSSE3-NEXT: psrad $31, %xmm2 ; SSSE3-NEXT: pcmpeqd %xmm4, %xmm2 -; SSSE3-NEXT: pcmpeqd %xmm0, %xmm0 -; SSSE3-NEXT: pxor %xmm2, %xmm0 -; SSSE3-NEXT: movd %xmm1, 8(%rdi) +; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1 +; SSSE3-NEXT: pxor %xmm2, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; SSSE3-NEXT: movd %xmm0, 8(%rdi) +; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: retq ; ; SSE41-LABEL: smulo_v3i32: @@ -1767,52 +1767,52 @@ define <4 x i32> @smulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun ; SSE2-NEXT: psrad $8, %xmm0 ; SSE2-NEXT: pslld $8, %xmm1 ; SSE2-NEXT: psrad $8, %xmm1 -; SSE2-NEXT: pxor %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm3, %xmm3 ; SSE2-NEXT: pxor %xmm2, %xmm2 ; SSE2-NEXT: pcmpgtd %xmm1, %xmm2 ; SSE2-NEXT: pand %xmm0, %xmm2 -; SSE2-NEXT: pcmpgtd %xmm0, %xmm4 -; SSE2-NEXT: pand %xmm1, %xmm4 -; SSE2-NEXT: paddd %xmm2, %xmm4 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] +; SSE2-NEXT: pcmpgtd %xmm0, %xmm3 +; SSE2-NEXT: pand %xmm1, %xmm3 +; SSE2-NEXT: paddd %xmm2, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] ; SSE2-NEXT: pmuludq %xmm1, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,3,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm5, %xmm2 +; SSE2-NEXT: pmuludq %xmm4, %xmm2 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] -; SSE2-NEXT: psubd %xmm4, %xmm3 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1] +; SSE2-NEXT: psubd %xmm3, %xmm5 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] -; SSE2-NEXT: movdqa %xmm4, %xmm1 +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE2-NEXT: movdqa %xmm3, %xmm1 ; SSE2-NEXT: pslld $8, %xmm1 ; SSE2-NEXT: psrad $8, %xmm1 -; SSE2-NEXT: pcmpeqd %xmm4, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1] -; SSE2-NEXT: psrad $31, %xmm4 -; SSE2-NEXT: pcmpeqd %xmm3, %xmm4 -; SSE2-NEXT: pcmpeqd %xmm3, %xmm3 -; SSE2-NEXT: pxor %xmm3, %xmm4 -; SSE2-NEXT: pxor %xmm3, %xmm1 -; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: pcmpeqd %xmm3, %xmm1 +; SSE2-NEXT: psrad $31, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm5, %xmm3 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 +; SSE2-NEXT: pxor %xmm4, %xmm3 +; SSE2-NEXT: pxor %xmm4, %xmm1 +; SSE2-NEXT: por %xmm3, %xmm1 ; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: movw %ax, (%rdi) -; SSE2-NEXT: movd %xmm2, %ecx -; SSE2-NEXT: movw %cx, 3(%rdi) +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; SSE2-NEXT: movd %xmm0, %ecx +; SSE2-NEXT: movw %cx, 6(%rdi) +; SSE2-NEXT: movd %xmm2, %edx +; SSE2-NEXT: movw %dx, 3(%rdi) ; SSE2-NEXT: shrl $16, %eax ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: shrl $16, %ecx -; SSE2-NEXT: movb %cl, 5(%rdi) -; SSE2-NEXT: movd %xmm5, %eax +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3] +; SSE2-NEXT: movd %xmm0, %eax ; SSE2-NEXT: movw %ax, 9(%rdi) -; SSE2-NEXT: movd %xmm6, %ecx -; SSE2-NEXT: movw %cx, 6(%rdi) -; SSE2-NEXT: shrl $16, %eax -; SSE2-NEXT: movb %al, 11(%rdi) ; SSE2-NEXT: shrl $16, %ecx ; SSE2-NEXT: movb %cl, 8(%rdi) +; SSE2-NEXT: shrl $16, %edx +; SSE2-NEXT: movb %dl, 5(%rdi) +; SSE2-NEXT: shrl $16, %eax +; SSE2-NEXT: movb %al, 11(%rdi) ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -1822,52 +1822,52 @@ define <4 x i32> @smulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun ; SSSE3-NEXT: psrad $8, %xmm0 ; SSSE3-NEXT: pslld $8, %xmm1 ; SSSE3-NEXT: psrad $8, %xmm1 -; SSSE3-NEXT: pxor %xmm4, %xmm4 +; SSSE3-NEXT: pxor %xmm3, %xmm3 ; SSSE3-NEXT: pxor %xmm2, %xmm2 ; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2 ; SSSE3-NEXT: pand %xmm0, %xmm2 -; SSSE3-NEXT: pcmpgtd %xmm0, %xmm4 -; SSSE3-NEXT: pand %xmm1, %xmm4 -; SSSE3-NEXT: paddd %xmm2, %xmm4 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] +; SSSE3-NEXT: pcmpgtd %xmm0, %xmm3 +; SSSE3-NEXT: pand %xmm1, %xmm3 +; SSSE3-NEXT: paddd %xmm2, %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] ; SSSE3-NEXT: pmuludq %xmm1, %xmm0 -; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,3,2,3] ; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; SSSE3-NEXT: pmuludq %xmm5, %xmm2 +; SSSE3-NEXT: pmuludq %xmm4, %xmm2 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] -; SSSE3-NEXT: psubd %xmm4, %xmm3 -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1] +; SSSE3-NEXT: psubd %xmm3, %xmm5 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3] ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1] -; SSSE3-NEXT: movdqa %xmm4, %xmm1 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSSE3-NEXT: movdqa %xmm3, %xmm1 ; SSSE3-NEXT: pslld $8, %xmm1 ; SSSE3-NEXT: psrad $8, %xmm1 -; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3] -; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1] -; SSSE3-NEXT: psrad $31, %xmm4 -; SSSE3-NEXT: pcmpeqd %xmm3, %xmm4 -; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3 -; SSSE3-NEXT: pxor %xmm3, %xmm4 -; SSSE3-NEXT: pxor %xmm3, %xmm1 -; SSSE3-NEXT: por %xmm4, %xmm1 +; SSSE3-NEXT: pcmpeqd %xmm3, %xmm1 +; SSSE3-NEXT: psrad $31, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm5, %xmm3 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4 +; SSSE3-NEXT: pxor %xmm4, %xmm3 +; SSSE3-NEXT: pxor %xmm4, %xmm1 +; SSSE3-NEXT: por %xmm3, %xmm1 ; SSSE3-NEXT: movd %xmm0, %eax ; SSSE3-NEXT: movw %ax, (%rdi) -; SSSE3-NEXT: movd %xmm2, %ecx -; SSSE3-NEXT: movw %cx, 3(%rdi) +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; SSSE3-NEXT: movd %xmm0, %ecx +; SSSE3-NEXT: movw %cx, 6(%rdi) +; SSSE3-NEXT: movd %xmm2, %edx +; SSSE3-NEXT: movw %dx, 3(%rdi) ; SSSE3-NEXT: shrl $16, %eax ; SSSE3-NEXT: movb %al, 2(%rdi) -; SSSE3-NEXT: shrl $16, %ecx -; SSSE3-NEXT: movb %cl, 5(%rdi) -; SSSE3-NEXT: movd %xmm5, %eax +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3] +; SSSE3-NEXT: movd %xmm0, %eax ; SSSE3-NEXT: movw %ax, 9(%rdi) -; SSSE3-NEXT: movd %xmm6, %ecx -; SSSE3-NEXT: movw %cx, 6(%rdi) -; SSSE3-NEXT: shrl $16, %eax -; SSSE3-NEXT: movb %al, 11(%rdi) ; SSSE3-NEXT: shrl $16, %ecx ; SSSE3-NEXT: movb %cl, 8(%rdi) +; SSSE3-NEXT: shrl $16, %edx +; SSSE3-NEXT: movb %dl, 5(%rdi) +; SSSE3-NEXT: shrl $16, %eax +; SSSE3-NEXT: movb %al, 11(%rdi) ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/vec_umulo.ll b/llvm/test/CodeGen/X86/vec_umulo.ll index c8146e3..52b9205 100644 --- a/llvm/test/CodeGen/X86/vec_umulo.ll +++ b/llvm/test/CodeGen/X86/vec_umulo.ll @@ -179,12 +179,10 @@ define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun ; SSE2-NEXT: pcmpeqd %xmm3, %xmm2 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm1 ; SSE2-NEXT: pxor %xmm2, %xmm1 -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSE2-NEXT: movd %xmm2, 8(%rdi) ; SSE2-NEXT: movq %xmm0, (%rdi) -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSE2-NEXT: movd %xmm0, 8(%rdi) ; SSE2-NEXT: movdqa %xmm1, %xmm0 ; SSE2-NEXT: retq ; @@ -201,12 +199,10 @@ define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun ; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2 ; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1 ; SSSE3-NEXT: pxor %xmm2, %xmm1 -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1] +; SSSE3-NEXT: movd %xmm2, 8(%rdi) ; SSSE3-NEXT: movq %xmm0, (%rdi) -; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; SSSE3-NEXT: movd %xmm0, 8(%rdi) ; SSSE3-NEXT: movdqa %xmm1, %xmm0 ; SSSE3-NEXT: retq ; @@ -1567,90 +1563,90 @@ define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun define <4 x i32> @umulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) nounwind { ; SSE2-LABEL: umulo_v4i24: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] -; SSE2-NEXT: pand %xmm2, %xmm1 -; SSE2-NEXT: pand %xmm2, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm1, %xmm0 -; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; SSE2-NEXT: pmuludq %xmm4, %xmm2 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSE2-NEXT: movdqa %xmm0, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] +; SSE2-NEXT: pand %xmm0, %xmm1 +; SSE2-NEXT: pand %xmm0, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm1, %xmm2 +; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSE2-NEXT: pmuludq %xmm0, %xmm1 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] ; SSE2-NEXT: pxor %xmm4, %xmm4 ; SSE2-NEXT: pcmpeqd %xmm4, %xmm3 -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,2,2,3] -; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] -; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[3,1,2,3] -; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1] -; SSE2-NEXT: psrld $24, %xmm1 -; SSE2-NEXT: pcmpeqd %xmm4, %xmm1 -; SSE2-NEXT: pcmpeqd %xmm4, %xmm4 -; SSE2-NEXT: pxor %xmm4, %xmm3 -; SSE2-NEXT: pxor %xmm4, %xmm1 -; SSE2-NEXT: por %xmm3, %xmm1 -; SSE2-NEXT: movd %xmm0, %eax +; SSE2-NEXT: pcmpeqd %xmm5, %xmm5 +; SSE2-NEXT: pxor %xmm5, %xmm3 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] +; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,2,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] +; SSE2-NEXT: psrld $24, %xmm0 +; SSE2-NEXT: pcmpeqd %xmm4, %xmm0 +; SSE2-NEXT: pxor %xmm5, %xmm0 +; SSE2-NEXT: por %xmm3, %xmm0 +; SSE2-NEXT: movd %xmm2, %eax ; SSE2-NEXT: movw %ax, (%rdi) +; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] ; SSE2-NEXT: movd %xmm2, %ecx -; SSE2-NEXT: movw %cx, 3(%rdi) +; SSE2-NEXT: movw %cx, 6(%rdi) +; SSE2-NEXT: movd %xmm1, %edx +; SSE2-NEXT: movw %dx, 3(%rdi) ; SSE2-NEXT: shrl $16, %eax ; SSE2-NEXT: movb %al, 2(%rdi) -; SSE2-NEXT: shrl $16, %ecx -; SSE2-NEXT: movb %cl, 5(%rdi) -; SSE2-NEXT: movd %xmm5, %eax +; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] +; SSE2-NEXT: movd %xmm1, %eax ; SSE2-NEXT: movw %ax, 9(%rdi) -; SSE2-NEXT: movd %xmm6, %ecx -; SSE2-NEXT: movw %cx, 6(%rdi) -; SSE2-NEXT: shrl $16, %eax -; SSE2-NEXT: movb %al, 11(%rdi) ; SSE2-NEXT: shrl $16, %ecx ; SSE2-NEXT: movb %cl, 8(%rdi) -; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: shrl $16, %edx +; SSE2-NEXT: movb %dl, 5(%rdi) +; SSE2-NEXT: shrl $16, %eax +; SSE2-NEXT: movb %al, 11(%rdi) ; SSE2-NEXT: retq ; ; SSSE3-LABEL: umulo_v4i24: ; SSSE3: # %bb.0: -; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] -; SSSE3-NEXT: pand %xmm2, %xmm1 -; SSSE3-NEXT: pand %xmm2, %xmm0 -; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3] -; SSSE3-NEXT: pmuludq %xmm1, %xmm0 -; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3] -; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3] -; SSSE3-NEXT: pmuludq %xmm4, %xmm2 -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1] +; SSSE3-NEXT: movdqa %xmm0, %xmm2 +; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0] +; SSSE3-NEXT: pand %xmm0, %xmm1 +; SSSE3-NEXT: pand %xmm0, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm1, %xmm2 +; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] +; SSSE3-NEXT: pmuludq %xmm0, %xmm1 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1] ; SSSE3-NEXT: pxor %xmm4, %xmm4 ; SSSE3-NEXT: pcmpeqd %xmm4, %xmm3 -; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3] -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,2,2,3] -; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1] -; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[3,1,2,3] -; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1] -; SSSE3-NEXT: psrld $24, %xmm1 -; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1 -; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4 -; SSSE3-NEXT: pxor %xmm4, %xmm3 -; SSSE3-NEXT: pxor %xmm4, %xmm1 -; SSSE3-NEXT: por %xmm3, %xmm1 -; SSSE3-NEXT: movd %xmm0, %eax +; SSSE3-NEXT: pcmpeqd %xmm5, %xmm5 +; SSSE3-NEXT: pxor %xmm5, %xmm3 +; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3] +; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,2,2,3] +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1] +; SSSE3-NEXT: psrld $24, %xmm0 +; SSSE3-NEXT: pcmpeqd %xmm4, %xmm0 +; SSSE3-NEXT: pxor %xmm5, %xmm0 +; SSSE3-NEXT: por %xmm3, %xmm0 +; SSSE3-NEXT: movd %xmm2, %eax ; SSSE3-NEXT: movw %ax, (%rdi) +; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3] ; SSSE3-NEXT: movd %xmm2, %ecx -; SSSE3-NEXT: movw %cx, 3(%rdi) +; SSSE3-NEXT: movw %cx, 6(%rdi) +; SSSE3-NEXT: movd %xmm1, %edx +; SSSE3-NEXT: movw %dx, 3(%rdi) ; SSSE3-NEXT: shrl $16, %eax ; SSSE3-NEXT: movb %al, 2(%rdi) -; SSSE3-NEXT: shrl $16, %ecx -; SSSE3-NEXT: movb %cl, 5(%rdi) -; SSSE3-NEXT: movd %xmm5, %eax +; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] +; SSSE3-NEXT: movd %xmm1, %eax ; SSSE3-NEXT: movw %ax, 9(%rdi) -; SSSE3-NEXT: movd %xmm6, %ecx -; SSSE3-NEXT: movw %cx, 6(%rdi) -; SSSE3-NEXT: shrl $16, %eax -; SSSE3-NEXT: movb %al, 11(%rdi) ; SSSE3-NEXT: shrl $16, %ecx ; SSSE3-NEXT: movb %cl, 8(%rdi) -; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: shrl $16, %edx +; SSSE3-NEXT: movb %dl, 5(%rdi) +; SSSE3-NEXT: shrl $16, %eax +; SSSE3-NEXT: movb %al, 11(%rdi) ; SSSE3-NEXT: retq ; ; SSE41-LABEL: umulo_v4i24: -- 2.7.4