From: Simon Pilgrim Date: Sun, 20 Sep 2020 16:11:01 +0000 (+0100) Subject: [X86][SSE] Fold SIGN_EXTEND(SIGN_EXTEND_VECTOR_INREG(X)) -> SIGN_EXTEND_VECTOR_INREG(X) X-Git-Tag: llvmorg-13-init~11502 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=bb0078e5911a8cd7742c99c391ccea802f02e22e;p=platform%2Fupstream%2Fllvm.git [X86][SSE] Fold SIGN_EXTEND(SIGN_EXTEND_VECTOR_INREG(X)) -> SIGN_EXTEND_VECTOR_INREG(X) It should be possible to make this generic, but we're not great at checking legality of *_EXTEND_VECTOR_INREG ops so I'm conservatively putting this inside X86ISelLowering.cpp --- diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 26333c6..f032758 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -46769,10 +46769,14 @@ static SDValue combineSext(SDNode *N, SelectionDAG &DAG, if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget)) return V; - if (VT.isVector()) + if (VT.isVector()) { if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget)) return R; + if (N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) + return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0)); + } + if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget)) return NewAdd; diff --git a/llvm/test/CodeGen/X86/masked_load.ll b/llvm/test/CodeGen/X86/masked_load.ll index 3a63608..c0b274d 100644 --- a/llvm/test/CodeGen/X86/masked_load.ll +++ b/llvm/test/CodeGen/X86/masked_load.ll @@ -462,9 +462,8 @@ define <8 x double> @load_v8f64_v8i16(<8 x i16> %trigger, <8 x double>* %addr, < ; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; AVX1-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpmovsxwq %xmm3, %xmm5 -; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] -; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,1,1] +; AVX1-NEXT: vpmovsxwq %xmm3, %xmm3 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3 ; AVX1-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vpmovsxwq %xmm0, %xmm4 @@ -482,8 +481,7 @@ define <8 x double> @load_v8f64_v8i16(<8 x i16> %trigger, <8 x double>* %addr, < ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] ; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; AVX2-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3 -; AVX2-NEXT: vpmovsxwd %xmm3, %xmm3 -; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3 +; AVX2-NEXT: vpmovsxwq %xmm3, %ymm3 ; AVX2-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0 ; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0 ; AVX2-NEXT: vmaskmovpd (%rdi), %ymm0, %ymm4 @@ -1782,9 +1780,8 @@ define <8 x i64> @load_v8i64_v8i16(<8 x i16> %trigger, <8 x i64>* %addr, <8 x i6 ; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; AVX1-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3 ; AVX1-NEXT: vpmovsxwq %xmm3, %xmm5 -; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3 -; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] -; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,1,1] +; AVX1-NEXT: vpmovsxwq %xmm3, %xmm3 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3 ; AVX1-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vpmovsxwq %xmm0, %xmm4 @@ -1802,8 +1799,7 @@ define <8 x i64> @load_v8i64_v8i16(<8 x i16> %trigger, <8 x i64>* %addr, <8 x i6 ; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] ; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; AVX2-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3 -; AVX2-NEXT: vpmovsxwd %xmm3, %xmm3 -; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3 +; AVX2-NEXT: vpmovsxwq %xmm3, %ymm3 ; AVX2-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0 ; AVX2-NEXT: vpmovsxwq %xmm0, %ymm0 ; AVX2-NEXT: vpmaskmovq (%rdi), %ymm0, %ymm4