[SelectionDAG] Use KnownBits::countMinSignBits() to simplify the end of ComputeNumSig...
authorCraig Topper <craig.topper@sifive.com>
Sat, 1 Jan 2022 01:29:57 +0000 (17:29 -0800)
committerCraig Topper <craig.topper@sifive.com>
Sat, 1 Jan 2022 01:29:57 +0000 (17:29 -0800)
This matches what is done in ValueTracking.cpp

Reviewed By: RKSimon, foad

Differential Revision: https://reviews.llvm.org/D116423

llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp

index 2ae0d4d..d14647d 100644 (file)
@@ -4294,21 +4294,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
   // Finally, if we can prove that the top bits of the result are 0's or 1's,
   // use this information.
   KnownBits Known = computeKnownBits(Op, DemandedElts, Depth);
-
-  APInt Mask;
-  if (Known.isNonNegative()) {        // sign bit is 0
-    Mask = Known.Zero;
-  } else if (Known.isNegative()) {  // sign bit is 1;
-    Mask = Known.One;
-  } else {
-    // Nothing known.
-    return FirstAnswer;
-  }
-
-  // Okay, we know that the sign bit in Mask is set.  Use CLO to determine
-  // the number of identical bits in the top of the input value.
-  Mask <<= Mask.getBitWidth()-VTBits;
-  return std::max(FirstAnswer, Mask.countLeadingOnes());
+  return std::max(FirstAnswer, Known.countMinSignBits());
 }
 
 unsigned SelectionDAG::ComputeMinSignedBits(SDValue Op, unsigned Depth) const {