From 6c94264b1f3342376ec3d46e14634d39cf158a49 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 14 Nov 2018 18:16:21 +0000 Subject: [PATCH] [X86] Allow pmulh to be formed from narrow vXi16 vectors under -x86-experimental-vector-widening-legalization Narrower vectors will be widened to 128 bits without changing the element size. And generic type legalization can already handle widening mulhu/mulhs. Differential Revision: https://reviews.llvm.org/D54513 llvm-svn: 346879 --- llvm/lib/Target/X86/X86ISelLowering.cpp | 6 ++++-- llvm/test/CodeGen/X86/pmulh.ll | 22 ++-------------------- 2 files changed, 6 insertions(+), 22 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index d142137..9f83593 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -37552,9 +37552,11 @@ static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL, if (!Subtarget.hasSSE2()) return SDValue(); - // Only handle vXi16 types that are at least 128-bits. + // Only handle vXi16 types that are at least 128-bits unless they will be + // widened. if (!VT.isVector() || VT.getVectorElementType() != MVT::i16 || - VT.getVectorNumElements() < 8) + (!ExperimentalVectorWideningLegalization && + VT.getVectorNumElements() < 8)) return SDValue(); // Input type should be vXi32. diff --git a/llvm/test/CodeGen/X86/pmulh.ll b/llvm/test/CodeGen/X86/pmulh.ll index f86b71f..7068650 100644 --- a/llvm/test/CodeGen/X86/pmulh.ll +++ b/llvm/test/CodeGen/X86/pmulh.ll @@ -24,11 +24,6 @@ define <4 x i16> @mulhuw_v4i16(<4 x i16> %a, <4 x i16> %b) { ; SSE2-WIDEN-LABEL: mulhuw_v4i16: ; SSE2-WIDEN: # %bb.0: ; SSE2-WIDEN-NEXT: pmulhuw %xmm1, %xmm0 -; SSE2-WIDEN-NEXT: pxor %xmm1, %xmm1 -; SSE2-WIDEN-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-WIDEN-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; SSE2-WIDEN-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] -; SSE2-WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-WIDEN-NEXT: retq ; ; SSE41-PROMOTE-LABEL: mulhuw_v4i16: @@ -42,11 +37,7 @@ define <4 x i16> @mulhuw_v4i16(<4 x i16> %a, <4 x i16> %b) { ; ; SSE41-WIDEN-LABEL: mulhuw_v4i16: ; SSE41-WIDEN: # %bb.0: -; SSE41-WIDEN-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero -; SSE41-WIDEN-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero -; SSE41-WIDEN-NEXT: pmulld %xmm2, %xmm0 -; SSE41-WIDEN-NEXT: psrld $16, %xmm0 -; SSE41-WIDEN-NEXT: packusdw %xmm0, %xmm0 +; SSE41-WIDEN-NEXT: pmulhuw %xmm1, %xmm0 ; SSE41-WIDEN-NEXT: retq ; ; AVX-LABEL: mulhuw_v4i16: @@ -82,11 +73,6 @@ define <4 x i16> @mulhw_v4i16(<4 x i16> %a, <4 x i16> %b) { ; SSE2-WIDEN-LABEL: mulhw_v4i16: ; SSE2-WIDEN: # %bb.0: ; SSE2-WIDEN-NEXT: pmulhw %xmm1, %xmm0 -; SSE2-WIDEN-NEXT: pxor %xmm1, %xmm1 -; SSE2-WIDEN-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-WIDEN-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; SSE2-WIDEN-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] -; SSE2-WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE2-WIDEN-NEXT: retq ; ; SSE41-PROMOTE-LABEL: mulhw_v4i16: @@ -101,11 +87,7 @@ define <4 x i16> @mulhw_v4i16(<4 x i16> %a, <4 x i16> %b) { ; ; SSE41-WIDEN-LABEL: mulhw_v4i16: ; SSE41-WIDEN: # %bb.0: -; SSE41-WIDEN-NEXT: pmovsxwd %xmm0, %xmm2 -; SSE41-WIDEN-NEXT: pmovsxwd %xmm1, %xmm0 -; SSE41-WIDEN-NEXT: pmulld %xmm2, %xmm0 -; SSE41-WIDEN-NEXT: psrld $16, %xmm0 -; SSE41-WIDEN-NEXT: packusdw %xmm0, %xmm0 +; SSE41-WIDEN-NEXT: pmulhw %xmm1, %xmm0 ; SSE41-WIDEN-NEXT: retq ; ; AVX-LABEL: mulhw_v4i16: -- 2.7.4