From: Simon Pilgrim Date: Sun, 7 Jun 2020 14:59:12 +0000 (+0100) Subject: [X86][SSE] combineSetCCMOVMSK - add initial support for allof patterns. X-Git-Tag: llvmorg-12-init~3834 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=3a28ae091be754d315bcee9b7596429e38df5e2e;p=platform%2Fupstream%2Fllvm.git [X86][SSE] combineSetCCMOVMSK - add initial support for allof patterns. Handle MOVMSK 'allof' comparisons (X86ISD::SUB X, AllBitsMask) as well as 'anyof' patterns. This allows us to handle these patterns in the MOVMSK(BITCAST(X)) pattern to fix PR37087. --- diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index b373e63..a87b42b 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -40234,15 +40234,19 @@ static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC, static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC, SelectionDAG &DAG, const X86Subtarget &Subtarget) { - // Only handle eq/ne against zero (any_of). - // TODO: Handle eq/ne against -1 (all_of) as well. + // Handle eq/ne against zero (any_of). + // Handle eq/ne against -1 (all_of). if (!(CC == X86::COND_E || CC == X86::COND_NE)) return SDValue(); if (EFLAGS.getValueType() != MVT::i32) return SDValue(); unsigned CmpOpcode = EFLAGS.getOpcode(); - if (CmpOpcode != X86ISD::CMP || !isNullConstant(EFLAGS.getOperand(1))) + if (CmpOpcode != X86ISD::CMP && CmpOpcode != X86ISD::SUB) return SDValue(); + auto *CmpConstant = dyn_cast(EFLAGS.getOperand(1)); + if (!CmpConstant) + return SDValue(); + const APInt &CmpVal = CmpConstant->getAPIntValue(); SDValue CmpOp = EFLAGS.getOperand(0); unsigned CmpBits = CmpOp.getValueSizeInBits(); @@ -40259,6 +40263,14 @@ static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC, MVT VecVT = Vec.getSimpleValueType(); assert((VecVT.is128BitVector() || VecVT.is256BitVector()) && "Unexpected MOVMSK operand"); + unsigned NumElts = VecVT.getVectorNumElements(); + unsigned NumEltBits = VecVT.getScalarSizeInBits(); + + bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isNullValue(); + bool IsAllOf = CmpOpcode == X86ISD::SUB && NumElts <= CmpVal.getBitWidth() && + CmpVal.isMask(NumElts); + if (!IsAnyOf && !IsAllOf) + return SDValue(); // See if we can peek through to a vector with a wider element type, if the // signbits extend down to all the sub-elements as well. @@ -40266,15 +40278,17 @@ static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC, // potential SimplifyDemandedBits/Elts cases. if (Vec.getOpcode() == ISD::BITCAST) { SDValue BC = peekThroughBitcasts(Vec); - unsigned NumEltBits = VecVT.getScalarSizeInBits(); - unsigned BCNumEltBits = BC.getScalarValueSizeInBits(); + MVT BCVT = BC.getSimpleValueType(); + unsigned BCNumElts = BCVT.getVectorNumElements(); + unsigned BCNumEltBits = BCVT.getScalarSizeInBits(); if ((BCNumEltBits == 32 || BCNumEltBits == 64) && BCNumEltBits > NumEltBits && DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) { SDLoc DL(EFLAGS); + unsigned CmpMask = IsAnyOf ? 0 : ((1 << BCNumElts) - 1); return DAG.getNode(X86ISD::CMP, DL, MVT::i32, DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC), - DAG.getConstant(0, DL, MVT::i32)); + DAG.getConstant(CmpMask, DL, MVT::i32)); } } @@ -40282,7 +40296,8 @@ static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC, // For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out // sign bits prior to the comparison with zero unless we know that // the vXi16 splats the sign bit down to the lower i8 half. - if (Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) { + // TODO: Handle all_of patterns. + if (IsAnyOf && Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) { SDValue VecOp0 = Vec.getOperand(0); SDValue VecOp1 = Vec.getOperand(1); bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8; diff --git a/llvm/test/CodeGen/X86/combine-movmsk-avx.ll b/llvm/test/CodeGen/X86/combine-movmsk-avx.ll index 610df24..b18c724 100644 --- a/llvm/test/CodeGen/X86/combine-movmsk-avx.ll +++ b/llvm/test/CodeGen/X86/combine-movmsk-avx.ll @@ -30,8 +30,8 @@ define i1 @movmskps_allof_bitcast_v4f64(<4 x double> %a0) { ; CHECK: # %bb.0: ; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0 -; CHECK-NEXT: vmovmskps %ymm0, %eax -; CHECK-NEXT: cmpl $255, %eax +; CHECK-NEXT: vmovmskpd %ymm0, %eax +; CHECK-NEXT: cmpl $15, %eax ; CHECK-NEXT: sete %al ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq diff --git a/llvm/test/CodeGen/X86/combine-movmsk.ll b/llvm/test/CodeGen/X86/combine-movmsk.ll index ad2e7ce..b0d4d7e 100644 --- a/llvm/test/CodeGen/X86/combine-movmsk.ll +++ b/llvm/test/CodeGen/X86/combine-movmsk.ll @@ -41,8 +41,8 @@ define i1 @movmskps_allof_bitcast_v2f64(<2 x double> %a0) { ; SSE: # %bb.0: ; SSE-NEXT: xorpd %xmm1, %xmm1 ; SSE-NEXT: cmpeqpd %xmm0, %xmm1 -; SSE-NEXT: movmskps %xmm1, %eax -; SSE-NEXT: cmpl $15, %eax +; SSE-NEXT: movmskpd %xmm1, %eax +; SSE-NEXT: cmpl $3, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -50,8 +50,8 @@ define i1 @movmskps_allof_bitcast_v2f64(<2 x double> %a0) { ; AVX: # %bb.0: ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vcmpeqpd %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vmovmskps %xmm0, %eax -; AVX-NEXT: cmpl $15, %eax +; AVX-NEXT: vmovmskpd %xmm0, %eax +; AVX-NEXT: cmpl $3, %eax ; AVX-NEXT: sete %al ; AVX-NEXT: retq %1 = fcmp oeq <2 x double> zeroinitializer, %a0 @@ -100,26 +100,22 @@ define i1 @pmovmskb_allof_bitcast_v2i64(<2 x i64> %a0) { ; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm1 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] -; SSE2-NEXT: pmovmskb %xmm0, %eax -; SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; SSE2-NEXT: movmskps %xmm0, %eax +; SSE2-NEXT: cmpl $15, %eax ; SSE2-NEXT: sete %al ; SSE2-NEXT: retq ; ; SSE42-LABEL: pmovmskb_allof_bitcast_v2i64: ; SSE42: # %bb.0: -; SSE42-NEXT: pxor %xmm1, %xmm1 -; SSE42-NEXT: pcmpgtq %xmm0, %xmm1 -; SSE42-NEXT: pmovmskb %xmm1, %eax -; SSE42-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; SSE42-NEXT: movmskpd %xmm0, %eax +; SSE42-NEXT: cmpl $3, %eax ; SSE42-NEXT: sete %al ; SSE42-NEXT: retq ; ; AVX-LABEL: pmovmskb_allof_bitcast_v2i64: ; AVX: # %bb.0: -; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 -; AVX-NEXT: vpmovmskb %xmm0, %eax -; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; AVX-NEXT: vmovmskpd %xmm0, %eax +; AVX-NEXT: cmpl $3, %eax ; AVX-NEXT: sete %al ; AVX-NEXT: retq %1 = icmp sgt <2 x i64> zeroinitializer, %a0 @@ -161,8 +157,8 @@ define i1 @pmovmskb_allof_bitcast_v4f32(<4 x float> %a0) { ; SSE: # %bb.0: ; SSE-NEXT: xorps %xmm1, %xmm1 ; SSE-NEXT: cmpeqps %xmm0, %xmm1 -; SSE-NEXT: pmovmskb %xmm1, %eax -; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; SSE-NEXT: movmskps %xmm1, %eax +; SSE-NEXT: cmpl $15, %eax ; SSE-NEXT: sete %al ; SSE-NEXT: retq ; @@ -170,8 +166,8 @@ define i1 @pmovmskb_allof_bitcast_v4f32(<4 x float> %a0) { ; AVX: # %bb.0: ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vpmovmskb %xmm0, %eax -; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF +; AVX-NEXT: vmovmskps %xmm0, %eax +; AVX-NEXT: cmpl $15, %eax ; AVX-NEXT: sete %al ; AVX-NEXT: retq %1 = fcmp oeq <4 x float> %a0, zeroinitializer