return false;
}
+static bool isAllInactivePredicate(SDValue N) {
+ // Look through cast.
+ while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST)
+ N = N.getOperand(0);
+
+ return ISD::isConstantSplatVectorAllZeros(N.getNode());
+}
+
+static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) {
+ unsigned NumElts = N.getValueType().getVectorMinNumElements();
+
+ // Look through cast.
+ while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST) {
+ N = N.getOperand(0);
+ // When reinterpreting from a type with fewer elements the "new" elements
+ // are not active, so bail if they're likely to be used.
+ if (N.getValueType().getVectorMinNumElements() < NumElts)
+ return false;
+ }
+
+ if (ISD::isConstantSplatVectorAllOnes(N.getNode()))
+ return true;
+
+ // "ptrue p.<ty>, all" can be considered all active when <ty> is the same size
+ // or smaller than the implicit element type represented by N.
+ // NOTE: A larger element count implies a smaller element type.
+ if (N.getOpcode() == AArch64ISD::PTRUE &&
+ N.getConstantOperandVal(0) == AArch64SVEPredPattern::all)
+ return N.getValueType().getVectorMinNumElements() >= NumElts;
+
+ // If we're compiling for a specific vector-length, we can check if the
+ // pattern's VL equals that of the scalable vector at runtime.
+ if (N.getOpcode() == AArch64ISD::PTRUE) {
+ const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
+ unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
+ unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
+ if (MaxSVESize && MinSVESize == MaxSVESize) {
+ unsigned VScale = MaxSVESize / AArch64::SVEBitsPerBlock;
+ unsigned PatNumElts =
+ getNumElementsFromSVEPredPattern(N.getConstantOperandVal(0));
+ return PatNumElts == (NumElts * VScale);
+ }
+ }
+
+ return false;
+}
+
static SDValue performSVEAndCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
if (DCI.isBeforeLegalizeOps())
Zero);
}
-static bool isAllInactivePredicate(SDValue N) {
- // Look through cast.
- while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST)
- N = N.getOperand(0);
-
- return ISD::isConstantSplatVectorAllZeros(N.getNode());
-}
-
-static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) {
- unsigned NumElts = N.getValueType().getVectorMinNumElements();
-
- // Look through cast.
- while (N.getOpcode() == AArch64ISD::REINTERPRET_CAST) {
- N = N.getOperand(0);
- // When reinterpreting from a type with fewer elements the "new" elements
- // are not active, so bail if they're likely to be used.
- if (N.getValueType().getVectorMinNumElements() < NumElts)
- return false;
- }
-
- if (ISD::isConstantSplatVectorAllOnes(N.getNode()))
- return true;
-
- // "ptrue p.<ty>, all" can be considered all active when <ty> is the same size
- // or smaller than the implicit element type represented by N.
- // NOTE: A larger element count implies a smaller element type.
- if (N.getOpcode() == AArch64ISD::PTRUE &&
- N.getConstantOperandVal(0) == AArch64SVEPredPattern::all)
- return N.getValueType().getVectorMinNumElements() >= NumElts;
-
- // If we're compiling for a specific vector-length, we can check if the
- // pattern's VL equals that of the scalable vector at runtime.
- if (N.getOpcode() == AArch64ISD::PTRUE) {
- const auto &Subtarget = DAG.getSubtarget<AArch64Subtarget>();
- unsigned MinSVESize = Subtarget.getMinSVEVectorSizeInBits();
- unsigned MaxSVESize = Subtarget.getMaxSVEVectorSizeInBits();
- if (MaxSVESize && MinSVESize == MaxSVESize) {
- unsigned VScale = MaxSVESize / AArch64::SVEBitsPerBlock;
- unsigned PatNumElts =
- getNumElementsFromSVEPredPattern(N.getConstantOperandVal(0));
- return PatNumElts == (NumElts * VScale);
- }
- }
-
- return false;
-}
-
// If a merged operation has no inactive lanes we can relax it to a predicated
// or unpredicated operation, which potentially allows better isel (perhaps
// using immediate forms) or relaxing register reuse requirements.