// arguments have ABI-prescribed offsets).
bool isAliased;
+ /// If true, the object has been zero-extended.
+ bool isZExt;
+
+ /// If true, the object has been zero-extended.
+ bool isSExt;
+
StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM,
bool isSS, const AllocaInst *Val, bool A)
: SPOffset(SP), Size(Sz), Alignment(Al), isImmutable(IM),
isSpillSlot(isSS), isStatepointSpillSlot(false), Alloca(Val),
- PreAllocated(false), isAliased(A) {}
+ PreAllocated(false), isAliased(A), isZExt(false), isSExt(false) {}
};
/// The alignment of the stack.
return Objects[ObjectIdx+NumFixedObjects].SPOffset;
}
+ bool isObjectZExt(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].isZExt;
+ }
+
+ void setObjectZExt(int ObjectIdx, bool IsZExt) {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ Objects[ObjectIdx+NumFixedObjects].isZExt = IsZExt;
+ }
+
+ bool isObjectSExt(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].isSExt;
+ }
+
+ void setObjectSExt(int ObjectIdx, bool IsSExt) {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ Objects[ObjectIdx+NumFixedObjects].isSExt = IsSExt;
+ }
+
/// Set the stack frame offset of the specified object. The
/// offset is relative to the stack pointer on entry to the function.
void setObjectOffset(int ObjectIdx, int64_t SPOffset) {
} else {
int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
VA.getLocMemOffset(), isImmutable);
+
+ // Set SExt or ZExt flag.
+ if (VA.getLocInfo() == CCValAssign::ZExt) {
+ MFI->setObjectZExt(FI, true);
+ } else if (VA.getLocInfo() == CCValAssign::SExt) {
+ MFI->setObjectSExt(FI, true);
+ }
+
// Adjust SP offset of interrupt parameter.
if (CallConv == CallingConv::X86_INTR) {
MFI->setObjectOffset(FI, Offset);
static
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
- const X86InstrInfo *TII) {
+ const X86InstrInfo *TII, const CCValAssign &VA) {
unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
int FI = INT_MAX;
if (Arg.getOpcode() == ISD::CopyFromReg) {
assert(FI != INT_MAX);
if (!MFI->isFixedObjectIndex(FI))
return false;
- return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
+
+ if (Offset != MFI->getObjectOffset(FI))
+ return false;
+
+ if (VA.getLocVT().getSizeInBits() > Arg.getValueType().getSizeInBits()) {
+ // If the argument location is wider than the argument type, check that any
+ // extension flags match.
+ if (Flags.isZExt() != MFI->isObjectZExt(FI) ||
+ Flags.isSExt() != MFI->isObjectSExt(FI)) {
+ return false;
+ }
+ }
+
+ return Bytes == MFI->getObjectSize(FI);
}
/// Check whether the call is eligible for tail call optimization. Targets
return false;
if (!VA.isRegLoc()) {
if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
- MFI, MRI, TII))
+ MFI, MRI, TII, VA))
return false;
}
}
--- /dev/null
+; RUN: llc -mtriple=i686-unknown-linux-gnu -o - %s | FileCheck %s
+
+declare void @f(i16 signext)
+declare void @g(i32 signext)
+
+
+define void @flags_match(i16 signext %x) {
+entry:
+ tail call void @f(i16 signext %x)
+ ret void
+
+; The parameter flags match; do the tail call.
+; CHECK-LABEL: flags_match:
+; CHECK: jmp f
+}
+
+define void @flags_mismatch(i16 zeroext %x) {
+entry:
+ tail call void @f(i16 signext %x)
+ ret void
+
+; The parameter flags mismatch. %x has not been sign-extended,
+; so tail call is not possible.
+; CHECK-LABEL: flags_mismatch:
+; CHECK: movswl
+; CHECK: calll f
+}
+
+
+define void @mismatch_doesnt_matter(i32 zeroext %x) {
+entry:
+ tail call void @g(i32 signext %x)
+ ret void
+
+; The parameter flags mismatch, but the type is wide enough that
+; no extension takes place in practice, so do the tail call.
+
+; CHECK-LABEL: mismatch_doesnt_matter:
+; CHECK: jmp g
+}