// Calculate required stack adjustment.
uint64_t FrameSize = StackSize - SlotSize;
- // If required, include space for extra hidden slot for stashing base pointer.
- if (X86FI->getRestoreBasePointer())
- FrameSize += SlotSize;
-
NumBytes = FrameSize -
(X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize);
I.setFrameIdx(SlotIndex);
}
+ // Adjust the offset of spill slot as we know the accurate callee saved frame
+ // size.
+ if (X86FI->getRestoreBasePointer()) {
+ SpillSlotOffset -= SlotSize;
+ CalleeSavedFrameSize += SlotSize;
+
+ MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
+ // TODO: saving the slot index is better?
+ X86FI->setRestoreBasePointer(CalleeSavedFrameSize);
+ }
X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
MFI.setCVBytesOfCalleeSavedRegisters(CalleeSavedFrameSize);
.setMIFlag(MachineInstr::FrameSetup);
}
+ const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ if (X86FI->getRestoreBasePointer()) {
+ unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
+ Register BaseReg = this->TRI->getBaseRegister();
+ BuildMI(MBB, MI, DL, TII.get(Opc))
+ .addReg(BaseReg, getKillRegState(true))
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
// Make XMM regs spilled. X86 does not have ability of push/pop XMM.
// It can be done by spilling XMMs to stack frame.
for (const CalleeSavedInfo &I : llvm::reverse(CSI)) {
Register());
}
+ // Clear the stack slot for spill base pointer register.
+ MachineFunction &MF = *MBB.getParent();
+ const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ if (X86FI->getRestoreBasePointer()) {
+ unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
+ Register BaseReg = this->TRI->getBaseRegister();
+ BuildMI(MBB, MI, DL, TII.get(Opc), BaseReg)
+ .setMIFlag(MachineInstr::FrameDestroy);
+ }
+
// POP GPRs.
unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
for (const CalleeSavedInfo &I : CSI) {
%t = call i32 @llvm.eh.sjlj.setjmp(ptr %s)
call void @whatever(i64 %n, ptr %f, ptr %p, ptr %q, ptr %s, i32 %t) #1
ret i32 0
+; X86: pushl %esi
+; X86-NEXT: pushl %esi
; X86: movl %esp, %esi
; X86: movl %esp, -16(%ebp)
; X86: {{.LBB.*:}}
; X86: movl -16(%ebp), %esi
; X86: {{.LBB.*:}}
+; X86: popl %esi
+; X86-NEXT: popl %esi
+; X64: pushq %rbx
+; X64-NEXT: pushq %rbx
; X64: movq %rsp, %rbx
; X64: movq %rsp, -48(%rbp)
; X64: {{.LBB.*:}}
; X64: movq -48(%rbp), %rbx
; X64: {{.LBB.*:}}
+; X64: popq %rbx
+; X64-NEXT: popq %rbx
}