return CI->isTailCall();
}
-bool AArch64TargetLowering::getIndexedAddressParts(
- SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset,
- ISD::MemIndexedMode &AM, bool &IsInc, SelectionDAG &DAG) const {
+bool AArch64TargetLowering::getIndexedAddressParts(SDNode *N, SDNode *Op,
+ SDValue &Base,
+ SDValue &Offset,
+ SelectionDAG &DAG) const {
if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)
return false;
RHSC = -(uint64_t)RHSC;
if (!isInt<9>(RHSC))
return false;
- IsInc = (Op->getOpcode() == ISD::ADD);
- Offset = Op->getOperand(1);
+ // Always emit pre-inc/post-inc addressing mode. Use negated constant offset
+ // when dealing with subtraction.
+ Offset = DAG.getConstant(RHSC, SDLoc(N), RHS->getValueType(0));
return true;
}
return false;
} else
return false;
- bool IsInc;
- if (!getIndexedAddressParts(N, Ptr.getNode(), Base, Offset, AM, IsInc, DAG))
+ if (!getIndexedAddressParts(N, Ptr.getNode(), Base, Offset, DAG))
return false;
- AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC;
+ AM = ISD::PRE_INC;
return true;
}
} else
return false;
- bool IsInc;
- if (!getIndexedAddressParts(N, Op, Base, Offset, AM, IsInc, DAG))
+ if (!getIndexedAddressParts(N, Op, Base, Offset, DAG))
return false;
// Post-indexing updates the base, so it's not a valid transform
// if that's not the same as the load's pointer.
if (Ptr != Base)
return false;
- AM = IsInc ? ISD::POST_INC : ISD::POST_DEC;
+ AM = ISD::POST_INC;
return true;
}
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
bool getIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
- SDValue &Offset, ISD::MemIndexedMode &AM,
- bool &IsInc, SelectionDAG &DAG) const;
+ SDValue &Offset, SelectionDAG &DAG) const;
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const override;
--- /dev/null
+; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck %s
+
+; Reduced test from https://github.com/llvm/llvm-project/issues/60645.
+; To check that we are generating -32 as offset for the first store.
+
+define i8* @pr60645(i8* %ptr, i64 %t0) {
+; CHECK-LABEL: pr60645:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub x8, x0, x1, lsl #2
+; CHECK-NEXT: str wzr, [x8, #-32]!
+; CHECK-NEXT: stur wzr, [x8, #-8]
+; CHECK-NEXT: ret
+ %t1 = add nuw nsw i64 %t0, 8
+ %t2 = mul i64 %t1, -4
+ %t3 = getelementptr i8, i8* %ptr, i64 %t2
+ %t4 = bitcast i8* %t3 to i32*
+ store i32 0, i32* %t4, align 4
+ %t5 = shl i64 %t1, 2
+ %t6 = sub nuw nsw i64 -8, %t5
+ %t7 = getelementptr i8, i8* %ptr, i64 %t6
+ %t8 = bitcast i8* %t7 to i32*
+ store i32 0, i32* %t8, align 4
+ ret i8* %ptr
+}