/// past it.
ANNOTATION_LABEL,
- /// CATCHPAD - Represents a catchpad instruction.
- CATCHPAD,
-
/// CATCHRET - Represents a return from a catch block funclet. Used for
/// MSVC compatible exception handling. Takes a chain operand and a
/// destination basic block operand.
def catchret : SDNode<"ISD::CATCHRET" , SDTCatchret,
[SDNPHasChain, SDNPSideEffect]>;
def cleanupret : SDNode<"ISD::CLEANUPRET" , SDTNone, [SDNPHasChain]>;
-def catchpad : SDNode<"ISD::CATCHPAD" , SDTNone,
- [SDNPHasChain, SDNPSideEffect]>;
def trap : SDNode<"ISD::TRAP" , SDTNone,
[SDNPHasChain, SDNPSideEffect]>;
bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
bool IsSEH = isAsynchronousEHPersonality(Pers);
- bool IsWasmCXX = Pers == EHPersonality::Wasm_CXX;
MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
if (!IsSEH)
CatchPadMBB->setIsEHScopeEntry();
// In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
if (IsMSVCCXX || IsCoreCLR)
CatchPadMBB->setIsEHFuncletEntry();
- // Wasm does not need catchpads anymore
- if (!IsWasmCXX)
- DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other,
- getControlRoot()));
}
void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
return BB;
}
-MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchPad(
- MachineInstr &MI, MachineBasicBlock *BB) const {
- MI.eraseFromParent();
- return BB;
-}
-
MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
MachineInstr &MI, MachineBasicBlock *BB) const {
switch (MI.getOpcode()) {
case AArch64::CATCHRET:
return EmitLoweredCatchRet(MI, BB);
- case AArch64::CATCHPAD:
- return EmitLoweredCatchPad(MI, BB);
}
}
MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
- MachineBasicBlock *BB) const;
-
MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const override;
Sched<[]>;
}
-let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1,
- usesCustomInserter = 1 in
-def CATCHPAD : Pseudo<(outs), (ins), [(catchpad)]>, Sched<[]>;
-
//===----------------------------------------------------------------------===//
// Floating point immediate move.
//===----------------------------------------------------------------------===//
MBB.erase(MBBI);
return true;
}
- case X86::EH_RESTORE: {
- // Restore ESP and EBP, and optionally ESI if required.
- bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(
- MBB.getParent()->getFunction().getPersonalityFn()));
- X86FL->restoreWin32EHStackPointers(MBB, MBBI, DL, /*RestoreSP=*/IsSEH);
- MBBI->eraseFromParent();
- return true;
- }
case X86::LCMPXCHG8B_SAVE_EBX:
case X86::LCMPXCHG16B_SAVE_RBX: {
// Perform the following transformation.
UnwindHelpFI)
.addImm(-2);
}
+
+void X86FrameLowering::processFunctionBeforeFrameIndicesReplaced(
+ MachineFunction &MF, RegScavenger *RS) const {
+ if (STI.is32Bit() && MF.hasEHFunclets())
+ restoreWinEHStackPointersInParent(MF);
+}
+
+void X86FrameLowering::restoreWinEHStackPointersInParent(
+ MachineFunction &MF) const {
+ // 32-bit functions have to restore stack pointers when control is transferred
+ // back to the parent function. These blocks are identified as eh pads that
+ // are not funclet entries.
+ bool IsSEH = isAsynchronousEHPersonality(
+ classifyEHPersonality(MF.getFunction().getPersonalityFn()));
+ for (MachineBasicBlock &MBB : MF) {
+ bool NeedsRestore = MBB.isEHPad() && !MBB.isEHFuncletEntry();
+ if (NeedsRestore)
+ restoreWin32EHStackPointers(MBB, MBB.begin(), DebugLoc(),
+ /*RestoreSP=*/IsSEH);
+ }
+}
void processFunctionBeforeFrameFinalized(MachineFunction &MF,
RegScavenger *RS) const override;
+ void
+ processFunctionBeforeFrameIndicesReplaced(MachineFunction &MF,
+ RegScavenger *RS) const override;
+
/// Check the instruction before/after the passed instruction. If
/// it is an ADD/SUB/LEA instruction it is deleted argument and the
/// stack adjustment is returned as a positive value for ADD/LEA and
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, bool RestoreSP = false) const;
+ void restoreWinEHStackPointersInParent(MachineFunction &MF) const;
+
int getInitialCFAOffset(const MachineFunction &MF) const override;
unsigned getInitialCFARegister(const MachineFunction &MF) const override;
BB->addSuccessor(RestoreMBB);
MI.getOperand(0).setMBB(RestoreMBB);
+ // Marking this as an EH pad but not a funclet entry block causes PEI to
+ // restore stack pointers in the block.
+ RestoreMBB->setIsEHPad(true);
+
auto RestoreMBBI = RestoreMBB->begin();
- BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
return BB;
}
MachineBasicBlock *
-X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
- MachineBasicBlock *BB) const {
- MachineFunction *MF = BB->getParent();
- const Constant *PerFn = MF->getFunction().getPersonalityFn();
- bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
- // Only 32-bit SEH requires special handling for catchpad.
- if (IsSEH && Subtarget.is32Bit()) {
- const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
- DebugLoc DL = MI.getDebugLoc();
- BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
- }
- MI.eraseFromParent();
- return BB;
-}
-
-MachineBasicBlock *
X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
MachineBasicBlock *BB) const {
// So, here we replace TLSADDR with the sequence:
return EmitLoweredRetpoline(MI, BB);
case X86::CATCHRET:
return EmitLoweredCatchRet(MI, BB);
- case X86::CATCHPAD:
- return EmitLoweredCatchPad(MI, BB);
case X86::SEG_ALLOCA_32:
case X86::SEG_ALLOCA_64:
return EmitLoweredSegAlloca(MI, BB);
MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
MachineBasicBlock *BB) const;
- MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
- MachineBasicBlock *BB) const;
-
MachineBasicBlock *EmitLoweredSegAlloca(MachineInstr &MI,
MachineBasicBlock *BB) const;
[(catchret bb:$dst, bb:$from)]>;
}
-let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1,
- usesCustomInserter = 1 in
-def CATCHPAD : I<0, Pseudo, (outs), (ins), "# CATCHPAD", [(catchpad)]>;
-
-// This instruction is responsible for re-establishing stack pointers after an
-// exception has been caught and we are rejoining normal control flow in the
-// parent function or funclet. It generally sets ESP and EBP, and optionally
-// ESI. It is only needed for 32-bit WinEH, as the runtime restores CSRs for us
-// elsewhere.
-let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1 in
-def EH_RESTORE : I<0, Pseudo, (outs), (ins), "# EH_RESTORE", []>;
-
let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
usesCustomInserter = 1 in {
def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
--- /dev/null
+; RUN: llc < %s | FileCheck %s
+
+; In PR44697, the register allocator inserted loads into the __except block
+; before the instructions that restore EBP and ESP back to what they should be.
+; Make sure they are the first instructions in the __except block.
+
+; ModuleID = 't.cpp'
+source_filename = "t.cpp"
+target datalayout = "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:32-n8:16:32-a:0:32-S32"
+target triple = "i386-pc-windows-msvc19.24.28315"
+
+declare i8* @llvm.frameaddress.p0i8(i32 immarg)
+declare i8* @llvm.eh.recoverfp(i8*, i8*)
+declare i8* @llvm.localrecover(i8*, i8*, i32 immarg)
+declare dso_local i32 @_except_handler3(...)
+declare void @llvm.localescape(...)
+
+define dso_local zeroext i1 @invokewrapper(
+ void ()* nocapture %Fn,
+ i1 zeroext %DumpStackAndCleanup,
+ i32* nocapture dereferenceable(4) %RetCode)
+ personality i32 (...)* @_except_handler3 {
+entry:
+ %__exception_code = alloca i32, align 4
+ call void (...) @llvm.localescape(i32* nonnull %__exception_code)
+ invoke void %Fn()
+ to label %return unwind label %catch.dispatch
+
+catch.dispatch: ; preds = %entry
+ %0 = catchswitch within none [label %__except.ret] unwind to caller
+
+__except.ret: ; preds = %catch.dispatch
+ %1 = catchpad within %0 [i8* bitcast (i32 ()* @filter to i8*)]
+ catchret from %1 to label %__except
+
+__except: ; preds = %__except.ret
+ %2 = load i32, i32* %__exception_code, align 4
+ store i32 %2, i32* %RetCode, align 4
+ br label %return
+
+return: ; preds = %entry, %__except
+ %retval.0 = phi i1 [ false, %__except ], [ true, %entry ]
+ ret i1 %retval.0
+}
+
+; CHECK-LABEL: _invokewrapper: # @invokewrapper
+; CHECK: calll *8(%ebp)
+; CHECK: LBB0_2: # %return
+
+; CHECK: LBB0_1: # %__except.ret
+; CHECK-NEXT: movl -24(%ebp), %esp
+; CHECK-NEXT: addl $12, %ebp
+
+; Function Attrs: nofree nounwind
+define internal i32 @filter() {
+entry:
+ %0 = tail call i8* @llvm.frameaddress.p0i8(i32 1)
+ %1 = tail call i8* @llvm.eh.recoverfp(i8* bitcast (i1 (void ()*, i1, i32*)* @invokewrapper to i8*), i8* %0)
+ %2 = tail call i8* @llvm.localrecover(i8* bitcast (i1 (void ()*, i1, i32*)* @invokewrapper to i8*), i8* %1, i32 0)
+ %__exception_code = bitcast i8* %2 to i32*
+ %3 = getelementptr inbounds i8, i8* %0, i32 -20
+ %4 = bitcast i8* %3 to { i32*, i8* }**
+ %5 = load { i32*, i8* }*, { i32*, i8* }** %4, align 4
+ %6 = getelementptr inbounds { i32*, i8* }, { i32*, i8* }* %5, i32 0, i32 0
+ %7 = load i32*, i32** %6, align 4
+ %8 = load i32, i32* %7, align 4
+ store i32 %8, i32* %__exception_code, align 4
+ ret i32 1
+}