for (auto VT : {MVT::v4f16, MVT::v8f16, MVT::v2f32, MVT::v4f32, MVT::v2f64})
setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
- if (Subtarget->forceStreamingCompatibleSVE()) {
+ if (!Subtarget->isNeonAvailable()) {
setTruncStoreAction(MVT::v2f32, MVT::v2f16, Custom);
setTruncStoreAction(MVT::v4f32, MVT::v4f16, Custom);
setTruncStoreAction(MVT::v8f32, MVT::v8f16, Custom);
SDValue AArch64TargetLowering::LowerXOR(SDValue Op, SelectionDAG &DAG) const {
if (useSVEForFixedLengthVectorVT(Op.getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
+ !Subtarget->isNeonAvailable()))
return LowerToScalableOp(Op, DAG);
SDValue Sel = Op.getOperand(0);
if (VT.isScalableVector())
return LowerToPredicatedOp(Op, DAG, AArch64ISD::FP_EXTEND_MERGE_PASSTHRU);
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
+ if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()))
return LowerFixedLengthFPExtendToSVE(Op, DAG);
assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
EVT SrcVT = SrcVal.getValueType();
- if (useSVEForFixedLengthVectorVT(SrcVT,
- Subtarget->forceStreamingCompatibleSVE()))
+ if (useSVEForFixedLengthVectorVT(SrcVT, !Subtarget->isNeonAvailable()))
return LowerFixedLengthFPRoundToSVE(Op, DAG);
if (SrcVT != MVT::f128) {
return LowerToPredicatedOp(Op, DAG, Opcode);
}
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()) ||
- useSVEForFixedLengthVectorVT(InVT,
- Subtarget->forceStreamingCompatibleSVE()))
+ if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()) ||
+ useSVEForFixedLengthVectorVT(InVT, !Subtarget->isNeonAvailable()))
return LowerFixedLengthFPToIntToSVE(Op, DAG);
unsigned NumElts = InVT.getVectorNumElements();
return LowerToPredicatedOp(Op, DAG, Opcode);
}
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()) ||
- useSVEForFixedLengthVectorVT(InVT,
- Subtarget->forceStreamingCompatibleSVE()))
+ if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()) ||
+ useSVEForFixedLengthVectorVT(InVT, !Subtarget->isNeonAvailable()))
return LowerFixedLengthIntToFPToSVE(Op, DAG);
uint64_t VTSize = VT.getFixedSizeInBits();
SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
- // If SVE is available then i64 vector multiplications can also be made legal.
- bool OverrideNEON = Subtarget->forceStreamingCompatibleSVE();
-
+ bool OverrideNEON = !Subtarget->isNeonAvailable();
if (VT.isScalableVector() || useSVEForFixedLengthVectorVT(VT, OverrideNEON))
return LowerToPredicatedOp(Op, DAG, AArch64ISD::MUL_PRED);
return LowerMLOAD(Op, DAG);
case ISD::LOAD:
if (useSVEForFixedLengthVectorVT(Op.getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
+ !Subtarget->isNeonAvailable()))
return LowerFixedLengthVectorLoadToSVE(Op, DAG);
return LowerLOAD(Op, DAG);
case ISD::ADD:
getPackedSVEVectorVT(VT.getVectorElementType().changeTypeToInteger());
if (VT.isFixedLengthVector() &&
- useSVEForFixedLengthVectorVT(VT, Subtarget->forceStreamingCompatibleSVE())) {
+ useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable())) {
EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
In1 = convertToScalableVector(DAG, ContainerVT, In1);
assert(!IsParity && "ISD::PARITY of vector types not supported");
if (VT.isScalableVector() ||
- useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
+ useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()))
return LowerToPredicatedOp(Op, DAG, AArch64ISD::CTPOP_MERGE_PASSTHRU);
assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
return DAG.getNode(ISD::VSELECT, DL, Ty, SplatPred, TVal, FVal);
}
- if (useSVEForFixedLengthVectorVT(Ty,
- Subtarget->forceStreamingCompatibleSVE())) {
+ if (useSVEForFixedLengthVectorVT(Ty, !Subtarget->isNeonAvailable())) {
// FIXME: Ideally this would be the same as above using i1 types, however
// for the moment we can't deal with fixed i1 vector types properly, so
// instead extend the predicate to a result type sized integer vector.
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
+ if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()))
return LowerFixedLengthVECTOR_SHUFFLEToSVE(Op, DAG);
// Convert shuffles that are directly supported on NEON to target-specific
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
+ if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()))
return LowerToScalableOp(Op, DAG);
assert(VT.isScalableVector() && VT.getVectorElementType() == MVT::i1 &&
const SDValue *LHS = nullptr) {
EVT VT = Op.getValueType();
if (VT.isFixedLengthVector() &&
- DAG.getSubtarget<AArch64Subtarget>().forceStreamingCompatibleSVE())
+ !DAG.getSubtarget<AArch64Subtarget>().isNeonAvailable())
return SDValue();
if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
const SDValue *LHS = nullptr) {
EVT VT = Op.getValueType();
if (VT.isFixedLengthVector() &&
- DAG.getSubtarget<AArch64Subtarget>().forceStreamingCompatibleSVE())
+ !DAG.getSubtarget<AArch64Subtarget>().isNeonAvailable())
return SDValue();
if (Bits.getHiBits(64) == Bits.getLoBits(64)) {
SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
SelectionDAG &DAG) const {
if (useSVEForFixedLengthVectorVT(Op.getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
+ !Subtarget->isNeonAvailable()))
return LowerToScalableOp(Op, DAG);
// Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2))
SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE())) {
+ if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable())) {
if (auto SeqInfo = cast<BuildVectorSDNode>(Op)->isConstantSequence()) {
SDLoc DL(Op);
EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op,
SelectionDAG &DAG) const {
if (useSVEForFixedLengthVectorVT(Op.getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
+ !Subtarget->isNeonAvailable()))
return LowerFixedLengthConcatVectorsToSVE(Op, DAG);
assert(Op.getValueType().isScalableVector() &&
assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!");
if (useSVEForFixedLengthVectorVT(Op.getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
+ !Subtarget->isNeonAvailable()))
return LowerFixedLengthInsertVectorElt(Op, DAG);
EVT VT = Op.getOperand(0).getValueType();
return DAG.getAnyExtOrTrunc(Extract, DL, Op.getValueType());
}
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
+ if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()))
return LowerFixedLengthExtractVectorElt(Op, DAG);
// Check for non-constant or out of range lane.
// If this is extracting the upper 64-bits of a 128-bit vector, we match
// that directly.
if (Size == 64 && Idx * InVT.getScalarSizeInBits() == 64 &&
- InVT.getSizeInBits() == 128 && !Subtarget->forceStreamingCompatibleSVE())
+ InVT.getSizeInBits() == 128 && Subtarget->isNeonAvailable())
return Op;
- if (useSVEForFixedLengthVectorVT(InVT,
- Subtarget->forceStreamingCompatibleSVE())) {
+ if (useSVEForFixedLengthVectorVT(InVT, !Subtarget->isNeonAvailable())) {
SDLoc DL(Op);
EVT ContainerVT = getContainerForFixedLengthVector(DAG, InVT);
bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
// Currently no fixed length shuffles that require SVE are legal.
- if (useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
+ if (useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()))
return false;
if (VT.getVectorNumElements() == 4 &&
return SDValue();
if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
+ !Subtarget->isNeonAvailable()))
return LowerFixedLengthVectorTruncateToSVE(Op, DAG);
return SDValue();
switch (Op.getOpcode()) {
case ISD::SHL:
if (VT.isScalableVector() ||
- useSVEForFixedLengthVectorVT(VT,
- Subtarget->forceStreamingCompatibleSVE()))
+ useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable()))
return LowerToPredicatedOp(Op, DAG, AArch64ISD::SHL_PRED);
if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize)
case ISD::SRA:
case ISD::SRL:
if (VT.isScalableVector() ||
- useSVEForFixedLengthVectorVT(
- VT, Subtarget->forceStreamingCompatibleSVE())) {
+ useSVEForFixedLengthVectorVT(VT, !Subtarget->isNeonAvailable())) {
unsigned Opc = Op.getOpcode() == ISD::SRA ? AArch64ISD::SRA_PRED
: AArch64ISD::SRL_PRED;
return LowerToPredicatedOp(Op, DAG, Opc);
return LowerToPredicatedOp(Op, DAG, AArch64ISD::SETCC_MERGE_ZERO);
if (useSVEForFixedLengthVectorVT(Op.getOperand(0).getValueType(),
- Subtarget->forceStreamingCompatibleSVE()))
+ !Subtarget->isNeonAvailable()))
return LowerFixedLengthVectorSetccToSVE(Op, DAG);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
// Try to lower fixed length reductions to SVE.
EVT SrcVT = Src.getValueType();
- bool OverrideNEON = Subtarget->forceStreamingCompatibleSVE() ||
+ bool OverrideNEON = !Subtarget->isNeonAvailable() ||
Op.getOpcode() == ISD::VECREDUCE_AND ||
Op.getOpcode() == ISD::VECREDUCE_OR ||
Op.getOpcode() == ISD::VECREDUCE_XOR ||
}
unsigned VecSize = DL.getTypeSizeInBits(VecTy);
- if (Subtarget->forceStreamingCompatibleSVE() ||
+ if (!Subtarget->isNeonAvailable() ||
(Subtarget->useSVEForFixedLengthVectors() &&
(VecSize % Subtarget->getMinSVEVectorSizeInBits() == 0 ||
(VecSize < Subtarget->getMinSVEVectorSizeInBits() &&
static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) {
- if (!Subtarget->hasNEON() || Subtarget->forceStreamingCompatibleSVE())
+ if (!Subtarget->isNeonAvailable())
return SDValue();
if (!N->getValueType(0).isSimple())
// It also doesn't work for streaming mode because it causes generating
// bsl instructions that are invalid in streaming mode.
if (TLI.useSVEForFixedLengthVectorVT(
- VT,
- DAG.getSubtarget<AArch64Subtarget>().forceStreamingCompatibleSVE()))
+ VT, !DAG.getSubtarget<AArch64Subtarget>().isNeonAvailable()))
return SDValue();
SDValue N0 = N->getOperand(0);