From 15c8306056beefa61533a895e8d836db72fccd14 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sun, 20 Sep 2020 16:33:02 +0100 Subject: [PATCH] [X86][SSE] Fold EXTEND_VECTOR_INREG(EXTEND_VECTOR_INREG(X)) -> EXTEND_VECTOR_INREG(X) It should be possible to make this generic, but we're not great at checking legality of *_EXTEND_VECTOR_INREG ops so I'm conservatively putting this inside X86ISelLowering.cpp --- llvm/lib/Target/X86/X86ISelLowering.cpp | 4 ++++ llvm/test/CodeGen/X86/masked_load.ll | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 3dda2e0..26333c6 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -49278,6 +49278,10 @@ static SDValue combineEXTEND_VECTOR_INREG(SDNode *N, SelectionDAG &DAG, } } + // Fold EXTEND_VECTOR_INREG(EXTEND_VECTOR_INREG(X)) -> EXTEND_VECTOR_INREG(X). + if (Opcode == In.getOpcode()) + return DAG.getNode(Opcode, SDLoc(N), VT, In.getOperand(0)); + // Attempt to combine as a shuffle. // TODO: General ZERO_EXTEND_VECTOR_INREG support. if (Opcode == ISD::ANY_EXTEND_VECTOR_INREG || diff --git a/llvm/test/CodeGen/X86/masked_load.ll b/llvm/test/CodeGen/X86/masked_load.ll index d15b7f4..3a63608 100644 --- a/llvm/test/CodeGen/X86/masked_load.ll +++ b/llvm/test/CodeGen/X86/masked_load.ll @@ -461,8 +461,8 @@ define <8 x double> @load_v8f64_v8i16(<8 x i16> %trigger, <8 x double>* %addr, < ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] ; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; AVX1-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpmovsxwq %xmm3, %xmm5 ; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3 -; AVX1-NEXT: vpmovsxdq %xmm3, %xmm5 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] ; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3 @@ -1781,8 +1781,8 @@ define <8 x i64> @load_v8i64_v8i16(<8 x i16> %trigger, <8 x i64>* %addr, <8 x i6 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[2,3,2,3] ; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4 ; AVX1-NEXT: vpcmpeqw %xmm4, %xmm3, %xmm3 +; AVX1-NEXT: vpmovsxwq %xmm3, %xmm5 ; AVX1-NEXT: vpmovsxwd %xmm3, %xmm3 -; AVX1-NEXT: vpmovsxdq %xmm3, %xmm5 ; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,2,3] ; AVX1-NEXT: vpmovsxdq %xmm3, %xmm3 ; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3 -- 2.7.4