From 102f05bd34a76bf228850023618da30c8266e7c4 Mon Sep 17 00:00:00 2001 From: Philip Reames Date: Fri, 18 Nov 2022 15:14:18 -0800 Subject: [PATCH] Revert "[SDAG] Allow scalable vectors in ComputeNumSignBits" and follow up This reverts commits 3fb08d14a63f5f745d3fec2b5f6ffef4cf0a398c and f8c63a7fbf50fb5883bd566c7539d0ac18c7700f. There was a "timeout for a Halide Hexagon test" reported. Revert until investigation complete. --- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 45 ++++----------- .../CodeGen/AArch64/sve-masked-gather-legalize.ll | 2 +- llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll | 66 ++++++++++++++++------ llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll | 5 +- llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll | 5 +- llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll | 5 +- llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll | 5 +- 7 files changed, 75 insertions(+), 58 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index cba592f..ce86bf4 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -3962,10 +3962,11 @@ bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const { unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const { EVT VT = Op.getValueType(); - // Since the number of lanes in a scalable vector is unknown at compile time, - // we track one bit which is implicitly broadcast to all lanes. This means - // that all lanes in a scalable vector are considered demanded. - APInt DemandedElts = VT.isFixedLengthVector() + // TODO: Assume we don't know anything for now. + if (VT.isScalableVector()) + return 1; + + APInt DemandedElts = VT.isVector() ? APInt::getAllOnes(VT.getVectorNumElements()) : APInt(1, 1); return ComputeNumSignBits(Op, DemandedElts, Depth); @@ -3988,7 +3989,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, if (Depth >= MaxRecursionDepth) return 1; // Limit search depth. - if (!DemandedElts) + if (!DemandedElts || VT.isScalableVector()) return 1; // No demanded elts, better to assume we don't know anything. unsigned Opcode = Op.getOpcode(); @@ -4003,16 +4004,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, case ISD::MERGE_VALUES: return ComputeNumSignBits(Op.getOperand(Op.getResNo()), DemandedElts, Depth + 1); - case ISD::SPLAT_VECTOR: { - // Check if the sign bits of source go down as far as the truncated value. - unsigned NumSrcBits = Op.getOperand(0).getValueSizeInBits(); - unsigned NumSrcSignBits = ComputeNumSignBits(Op.getOperand(0), Depth + 1); - if (NumSrcSignBits > (NumSrcBits - VTBits)) - return NumSrcSignBits - (NumSrcBits - VTBits); - break; - } case ISD::BUILD_VECTOR: - assert(!VT.isScalableVector()); Tmp = VTBits; for (unsigned i = 0, e = Op.getNumOperands(); (i < e) && (Tmp > 1); ++i) { if (!DemandedElts[i]) @@ -4057,8 +4049,6 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, } case ISD::BITCAST: { - if (VT.isScalableVector()) - break; SDValue N0 = Op.getOperand(0); EVT SrcVT = N0.getValueType(); unsigned SrcBits = SrcVT.getScalarSizeInBits(); @@ -4116,8 +4106,6 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, Tmp2 = ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth+1); return std::max(Tmp, Tmp2); case ISD::SIGN_EXTEND_VECTOR_INREG: { - if (VT.isScalableVector()) - break; SDValue Src = Op.getOperand(0); EVT SrcVT = Src.getValueType(); APInt DemandedSrcElts = DemandedElts.zext(SrcVT.getVectorNumElements()); @@ -4335,8 +4323,6 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, break; } case ISD::EXTRACT_ELEMENT: { - if (VT.isScalableVector()) - break; const int KnownSign = ComputeNumSignBits(Op.getOperand(0), Depth+1); const int BitWidth = Op.getValueSizeInBits(); const int Items = Op.getOperand(0).getValueSizeInBits() / BitWidth; @@ -4350,8 +4336,6 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, return std::clamp(KnownSign - rIndex * BitWidth, 0, BitWidth); } case ISD::INSERT_VECTOR_ELT: { - if (VT.isScalableVector()) - break; // If we know the element index, split the demand between the // source vector and the inserted element, otherwise assume we need // the original demanded vector elements and the value. @@ -4382,7 +4366,6 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, return Tmp; } case ISD::EXTRACT_VECTOR_ELT: { - assert(!VT.isScalableVector()); SDValue InVec = Op.getOperand(0); SDValue EltNo = Op.getOperand(1); EVT VecVT = InVec.getValueType(); @@ -4421,8 +4404,6 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, return ComputeNumSignBits(Src, DemandedSrcElts, Depth + 1); } case ISD::CONCAT_VECTORS: { - if (VT.isScalableVector()) - break; // Determine the minimum number of sign bits across all demanded // elts of the input vectors. Early out if the result is already 1. Tmp = std::numeric_limits::max(); @@ -4441,8 +4422,6 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, return Tmp; } case ISD::INSERT_SUBVECTOR: { - if (VT.isScalableVector()) - break; // Demand any elements from the subvector and the remainder from the src its // inserted into. SDValue Src = Op.getOperand(0); @@ -4513,7 +4492,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, // We only need to handle vectors - computeKnownBits should handle // scalar cases. Type *CstTy = Cst->getType(); - if (CstTy->isVectorTy() && !VT.isScalableVector() && + if (CstTy->isVectorTy() && (NumElts * VTBits) == CstTy->getPrimitiveSizeInBits() && VTBits == CstTy->getScalarSizeInBits()) { Tmp = VTBits; @@ -4548,14 +4527,10 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts, Opcode == ISD::INTRINSIC_WO_CHAIN || Opcode == ISD::INTRINSIC_W_CHAIN || Opcode == ISD::INTRINSIC_VOID) { - // TODO: This can probably be removed once target code is audited. This - // is here purely to reduce patch size and review complexity. - if (!VT.isScalableVector()) { - unsigned NumBits = + unsigned NumBits = TLI->ComputeNumSignBitsForTargetNode(Op, DemandedElts, *this, Depth); - if (NumBits > 1) - FirstAnswer = std::max(FirstAnswer, NumBits); - } + if (NumBits > 1) + FirstAnswer = std::max(FirstAnswer, NumBits); } // Finally, if we can prove that the top bits of the result are 0's or 1's, diff --git a/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll b/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll index ed3f784..8244b5f 100644 --- a/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll +++ b/llvm/test/CodeGen/AArch64/sve-masked-gather-legalize.ll @@ -95,7 +95,7 @@ define @masked_gather_nxv2f32(float* %base, %indices %data = call @llvm.masked.gather.nxv2f32( %ptrs, i32 1, %mask, undef) diff --git a/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll b/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll index eebd4a2..f89ec1d 100644 --- a/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll +++ b/llvm/test/CodeGen/AArch64/sve-smulo-sdnode.ll @@ -9,10 +9,15 @@ define @smulo_nxv2i8( %x, % ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: sxtb z1.d, p0/m, z1.d ; CHECK-NEXT: sxtb z0.d, p0/m, z0.d +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.d, p0/m, z2.d, z1.d ; CHECK-NEXT: mul z0.d, p0/m, z0.d, z1.d -; CHECK-NEXT: movprfx z1, z0 -; CHECK-NEXT: sxtb z1.d, p0/m, z0.d -; CHECK-NEXT: cmpne p0.d, p0/z, z1.d, z0.d +; CHECK-NEXT: asr z1.d, z0.d, #63 +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: sxtb z3.d, p0/m, z0.d +; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, z1.d +; CHECK-NEXT: cmpne p0.d, p0/z, z3.d, z0.d +; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b ; CHECK-NEXT: mov z0.d, p0/m, #0 // =0x0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv2i8( %x, %y) @@ -30,10 +35,15 @@ define @smulo_nxv4i8( %x, % ; CHECK-NEXT: ptrue p0.s ; CHECK-NEXT: sxtb z1.s, p0/m, z1.s ; CHECK-NEXT: sxtb z0.s, p0/m, z0.s +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.s, p0/m, z2.s, z1.s ; CHECK-NEXT: mul z0.s, p0/m, z0.s, z1.s -; CHECK-NEXT: movprfx z1, z0 -; CHECK-NEXT: sxtb z1.s, p0/m, z0.s -; CHECK-NEXT: cmpne p0.s, p0/z, z1.s, z0.s +; CHECK-NEXT: asr z1.s, z0.s, #31 +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: sxtb z3.s, p0/m, z0.s +; CHECK-NEXT: cmpne p1.s, p0/z, z2.s, z1.s +; CHECK-NEXT: cmpne p0.s, p0/z, z3.s, z0.s +; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b ; CHECK-NEXT: mov z0.s, p0/m, #0 // =0x0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv4i8( %x, %y) @@ -51,10 +61,15 @@ define @smulo_nxv8i8( %x, % ; CHECK-NEXT: ptrue p0.h ; CHECK-NEXT: sxtb z1.h, p0/m, z1.h ; CHECK-NEXT: sxtb z0.h, p0/m, z0.h +; CHECK-NEXT: movprfx z2, z0 +; CHECK-NEXT: smulh z2.h, p0/m, z2.h, z1.h ; CHECK-NEXT: mul z0.h, p0/m, z0.h, z1.h -; CHECK-NEXT: movprfx z1, z0 -; CHECK-NEXT: sxtb z1.h, p0/m, z0.h -; CHECK-NEXT: cmpne p0.h, p0/z, z1.h, z0.h +; CHECK-NEXT: asr z1.h, z0.h, #15 +; CHECK-NEXT: movprfx z3, z0 +; CHECK-NEXT: sxtb z3.h, p0/m, z0.h +; CHECK-NEXT: cmpne p1.h, p0/z, z2.h, z1.h +; CHECK-NEXT: cmpne p0.h, p0/z, z3.h, z0.h +; CHECK-NEXT: sel p0.b, p0, p0.b, p1.b ; CHECK-NEXT: mov z0.h, p0/m, #0 // =0x0 ; CHECK-NEXT: ret %a = call { , } @llvm.smul.with.overflow.nxv8i8( %x, %y) @@ -160,10 +175,15 @@ define @smulo_nxv2i16( %x, , } @llvm.smul.with.overflow.nxv2i16( %x, %y) @@ -181,10 +201,15 @@ define @smulo_nxv4i16( %x, , } @llvm.smul.with.overflow.nxv4i16( %x, %y) @@ -290,10 +315,15 @@ define @smulo_nxv2i32( %x, , } @llvm.smul.with.overflow.nxv2i32( %x, %y) diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll index fd951d5..dbeefd0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll @@ -12,8 +12,11 @@ define @vdiv_vx_nxv8i7( %a, i7 signext %b, poison, i7 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll index c69f5fd..24414d5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll @@ -12,8 +12,11 @@ define @vmax_vx_nxv8i7( %a, i7 signext %b, poison, i7 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll index 95c5cda..ae749e5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll @@ -12,8 +12,11 @@ define @vmin_vx_nxv8i7( %a, i7 signext %b, poison, i7 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll index 74a8fce1..5f2fca9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll @@ -12,8 +12,11 @@ define @vrem_vx_nxv8i7( %a, i7 signext %b, poison, i7 %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer -- 2.7.4