/// This file implements a CFG sorting pass.
///
/// This pass reorders the blocks in a function to put them into topological
-/// order, ignoring loop backedges, and without any loop being interrupted
-/// by a block not dominated by the loop header, with special care to keep the
-/// order as similar as possible to the original order.
+/// order, ignoring loop backedges, and without any loop or exception being
+/// interrupted by a block not dominated by the its header, with special care
+/// to keep the order as similar as possible to the original order.
///
////===----------------------------------------------------------------------===//
#include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
#include "WebAssembly.h"
+#include "WebAssemblyExceptionInfo.h"
#include "WebAssemblySubtarget.h"
#include "WebAssemblyUtilities.h"
#include "llvm/ADT/PriorityQueue.h"
#define DEBUG_TYPE "wasm-cfg-sort"
namespace {
+
+// Wrapper for loops and exceptions
+class Region {
+public:
+ virtual ~Region() = default;
+ virtual MachineBasicBlock *getHeader() const = 0;
+ virtual bool contains(const MachineBasicBlock *MBB) const = 0;
+ virtual unsigned getNumBlocks() const = 0;
+ using block_iterator = typename ArrayRef<MachineBasicBlock *>::const_iterator;
+ virtual iterator_range<block_iterator> blocks() const = 0;
+ virtual bool isLoop() const = 0;
+};
+
+template <typename T> class ConcreteRegion : public Region {
+ const T *Region;
+
+public:
+ ConcreteRegion(const T *Region) : Region(Region) {}
+ MachineBasicBlock *getHeader() const override { return Region->getHeader(); }
+ bool contains(const MachineBasicBlock *MBB) const override {
+ return Region->contains(MBB);
+ }
+ unsigned getNumBlocks() const override { return Region->getNumBlocks(); }
+ iterator_range<block_iterator> blocks() const override {
+ return Region->blocks();
+ }
+ bool isLoop() const override { return false; }
+};
+
+template <> bool ConcreteRegion<MachineLoop>::isLoop() const { return true; }
+
+// This class has information of nested Regions; this is analogous to what
+// LoopInfo is for loops.
+class RegionInfo {
+ const MachineLoopInfo &MLI;
+ const WebAssemblyExceptionInfo &WEI;
+ std::vector<const Region *> Regions;
+ DenseMap<const MachineLoop *, std::unique_ptr<Region>> LoopMap;
+ DenseMap<const WebAssemblyException *, std::unique_ptr<Region>> ExceptionMap;
+
+public:
+ RegionInfo(const MachineLoopInfo &MLI, const WebAssemblyExceptionInfo &WEI)
+ : MLI(MLI), WEI(WEI) {}
+
+ // Returns a smallest loop or exception that contains MBB
+ const Region *getRegionFor(const MachineBasicBlock *MBB) {
+ const auto *ML = MLI.getLoopFor(MBB);
+ const auto *WE = WEI.getExceptionFor(MBB);
+ if (!ML && !WE)
+ return nullptr;
+ if ((ML && !WE) || (ML && WE && ML->getNumBlocks() < WE->getNumBlocks())) {
+ // If the smallest region containing MBB is a loop
+ if (LoopMap.count(ML))
+ return LoopMap[ML].get();
+ LoopMap[ML] = llvm::make_unique<ConcreteRegion<MachineLoop>>(ML);
+ return LoopMap[ML].get();
+ } else {
+ // If the smallest region containing MBB is an exception
+ if (ExceptionMap.count(WE))
+ return ExceptionMap[WE].get();
+ ExceptionMap[WE] =
+ llvm::make_unique<ConcreteRegion<WebAssemblyException>>(WE);
+ return ExceptionMap[WE].get();
+ }
+ }
+};
+
class WebAssemblyCFGSort final : public MachineFunctionPass {
StringRef getPassName() const override { return "WebAssembly CFG Sort"; }
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
AU.addPreserved<MachineLoopInfo>();
+ AU.addRequired<WebAssemblyExceptionInfo>();
+ AU.addPreserved<WebAssemblyExceptionInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
}
namespace {
+// EH pads are selected first regardless of the block comparison order.
+// When only one of the BBs is an EH pad, we give a higher priority to it, to
+// prevent common mismatches between possibly throwing calls and ehpads they
+// unwind to, as in the example below:
+//
+// bb0:
+// call @foo // If this throws, unwind to bb2
+// bb1:
+// call @bar // If this throws, unwind to bb3
+// bb2 (ehpad):
+// handler_bb2
+// bb3 (ehpad):
+// handler_bb3
+// continuing code
+//
+// Because this pass tries to preserve the original BB order, this order will
+// not change. But this will result in this try-catch structure in CFGStackify,
+// resulting in a mismatch:
+// try
+// try
+// call @foo
+// call @bar // This should unwind to bb3, not bb2!
+// catch
+// handler_bb2
+// end
+// catch
+// handler_bb3
+// end
+// continuing code
+//
+// If we give a higher priority to an EH pad whenever it is ready in this
+// example, when both bb1 and bb2 are ready, we would pick up bb2 first.
+
/// Sort blocks by their number.
struct CompareBlockNumbers {
bool operator()(const MachineBasicBlock *A,
const MachineBasicBlock *B) const {
+ if (A->isEHPad() && !B->isEHPad())
+ return false;
+ if (!A->isEHPad() && B->isEHPad())
+ return true;
+
return A->getNumber() > B->getNumber();
}
};
struct CompareBlockNumbersBackwards {
bool operator()(const MachineBasicBlock *A,
const MachineBasicBlock *B) const {
+ // We give a higher priority to an EH pad
+ if (A->isEHPad() && !B->isEHPad())
+ return false;
+ if (!A->isEHPad() && B->isEHPad())
+ return true;
+
return A->getNumber() < B->getNumber();
}
};
-/// Bookkeeping for a loop to help ensure that we don't mix blocks not dominated
-/// by the loop header among the loop's blocks.
+/// Bookkeeping for a region to help ensure that we don't mix blocks not
+/// dominated by the its header among its blocks.
struct Entry {
- const MachineLoop *Loop;
+ const Region *Region;
unsigned NumBlocksLeft;
/// List of blocks not dominated by Loop's header that are deferred until
/// after all of Loop's blocks have been seen.
std::vector<MachineBasicBlock *> Deferred;
- explicit Entry(const MachineLoop *L)
- : Loop(L), NumBlocksLeft(L->getNumBlocks()) {}
+ explicit Entry(const class Region *R)
+ : Region(R), NumBlocksLeft(R->getNumBlocks()) {}
};
} // end anonymous namespace
-/// Sort the blocks, taking special care to make sure that loops are not
+/// Sort the blocks, taking special care to make sure that regions are not
/// interrupted by blocks not dominated by their header.
/// TODO: There are many opportunities for improving the heuristics here.
/// Explore them.
static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI,
+ const WebAssemblyExceptionInfo &WEI,
const MachineDominatorTree &MDT) {
// Prepare for a topological sort: Record the number of predecessors each
// block has, ignoring loop backedges.
}
// Topological sort the CFG, with additional constraints:
- // - Between a loop header and the last block in the loop, there can be
- // no blocks not dominated by the loop header.
+ // - Between a region header and the last block in the region, there can be
+ // no blocks not dominated by its header.
// - It's desirable to preserve the original block order when possible.
// We use two ready lists; Preferred and Ready. Preferred has recently
// processed successors, to help preserve block sequences from the original
- // order. Ready has the remaining ready blocks.
+ // order. Ready has the remaining ready blocks. EH blocks are picked first
+ // from both queues.
PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
CompareBlockNumbers>
Preferred;
PriorityQueue<MachineBasicBlock *, std::vector<MachineBasicBlock *>,
CompareBlockNumbersBackwards>
Ready;
- SmallVector<Entry, 4> Loops;
+
+ RegionInfo SUI(MLI, WEI);
+ SmallVector<Entry, 4> Entries;
for (MachineBasicBlock *MBB = &MF.front();;) {
- const MachineLoop *L = MLI.getLoopFor(MBB);
- if (L) {
- // If MBB is a loop header, add it to the active loop list. We can't put
- // any blocks that it doesn't dominate until we see the end of the loop.
- if (L->getHeader() == MBB)
- Loops.push_back(Entry(L));
- // For each active loop the block is in, decrement the count. If MBB is
- // the last block in an active loop, take it off the list and pick up any
- // blocks deferred because the header didn't dominate them.
- for (Entry &E : Loops)
- if (E.Loop->contains(MBB) && --E.NumBlocksLeft == 0)
+ const Region *R = SUI.getRegionFor(MBB);
+ if (R) {
+ // If MBB is a region header, add it to the active region list. We can't
+ // put any blocks that it doesn't dominate until we see the end of the
+ // region.
+ if (R->getHeader() == MBB)
+ Entries.push_back(Entry(R));
+ // For each active region the block is in, decrement the count. If MBB is
+ // the last block in an active region, take it off the list and pick up
+ // any blocks deferred because the header didn't dominate them.
+ for (Entry &E : Entries)
+ if (E.Region->contains(MBB) && --E.NumBlocksLeft == 0)
for (auto DeferredBlock : E.Deferred)
Ready.push(DeferredBlock);
- while (!Loops.empty() && Loops.back().NumBlocksLeft == 0)
- Loops.pop_back();
+ while (!Entries.empty() && Entries.back().NumBlocksLeft == 0)
+ Entries.pop_back();
}
// The main topological sort logic.
for (MachineBasicBlock *Succ : MBB->successors()) {
while (!Preferred.empty()) {
Next = Preferred.top();
Preferred.pop();
- // If X isn't dominated by the top active loop header, defer it until that
- // loop is done.
- if (!Loops.empty() &&
- !MDT.dominates(Loops.back().Loop->getHeader(), Next)) {
- Loops.back().Deferred.push_back(Next);
+ // If X isn't dominated by the top active region header, defer it until
+ // that region is done.
+ if (!Entries.empty() &&
+ !MDT.dominates(Entries.back().Region->getHeader(), Next)) {
+ Entries.back().Deferred.push_back(Next);
Next = nullptr;
continue;
}
// If Next was originally ordered before MBB, and it isn't because it was
// loop-rotated above the header, it's not preferred.
if (Next->getNumber() < MBB->getNumber() &&
- (!L || !L->contains(Next) ||
- L->getHeader()->getNumber() < Next->getNumber())) {
+ (!R || !R->contains(Next) ||
+ R->getHeader()->getNumber() < Next->getNumber())) {
Ready.push(Next);
Next = nullptr;
continue;
for (;;) {
Next = Ready.top();
Ready.pop();
- // If Next isn't dominated by the top active loop header, defer it until
- // that loop is done.
- if (!Loops.empty() &&
- !MDT.dominates(Loops.back().Loop->getHeader(), Next)) {
- Loops.back().Deferred.push_back(Next);
+ // If Next isn't dominated by the top active region header, defer it
+ // until that region is done.
+ if (!Entries.empty() &&
+ !MDT.dominates(Entries.back().Region->getHeader(), Next)) {
+ Entries.back().Deferred.push_back(Next);
continue;
}
break;
MaybeUpdateTerminator(MBB);
MBB = Next;
}
- assert(Loops.empty() && "Active loop list not finished");
+ assert(Entries.empty() && "Active sort region list not finished");
MF.RenumberBlocks();
#ifndef NDEBUG
- SmallSetVector<MachineLoop *, 8> OnStack;
+ SmallSetVector<const Region *, 8> OnStack;
// Insert a sentinel representing the degenerate loop that starts at the
// function entry block and includes the entire function as a "loop" that
for (auto &MBB : MF) {
assert(MBB.getNumber() >= 0 && "Renumbered blocks should be non-negative.");
+ const Region *Region = SUI.getRegionFor(&MBB);
+
+ if (Region && &MBB == Region->getHeader()) {
+ if (Region->isLoop()) {
+ // Loop header. The loop predecessor should be sorted above, and the
+ // other predecessors should be backedges below.
+ for (auto Pred : MBB.predecessors())
+ assert(
+ (Pred->getNumber() < MBB.getNumber() || Region->contains(Pred)) &&
+ "Loop header predecessors must be loop predecessors or "
+ "backedges");
+ } else {
+ // Not a loop header. All predecessors should be sorted above.
+ for (auto Pred : MBB.predecessors())
+ assert(Pred->getNumber() < MBB.getNumber() &&
+ "Non-loop-header predecessors should be topologically sorted");
+ }
+ assert(OnStack.insert(Region) &&
+ "Regions should be declared at most once.");
- MachineLoop *Loop = MLI.getLoopFor(&MBB);
- if (Loop && &MBB == Loop->getHeader()) {
- // Loop header. The loop predecessor should be sorted above, and the other
- // predecessors should be backedges below.
- for (auto Pred : MBB.predecessors())
- assert(
- (Pred->getNumber() < MBB.getNumber() || Loop->contains(Pred)) &&
- "Loop header predecessors must be loop predecessors or backedges");
- assert(OnStack.insert(Loop) && "Loops should be declared at most once.");
} else {
// Not a loop header. All predecessors should be sorted above.
for (auto Pred : MBB.predecessors())
assert(Pred->getNumber() < MBB.getNumber() &&
"Non-loop-header predecessors should be topologically sorted");
- assert(OnStack.count(MLI.getLoopFor(&MBB)) &&
- "Blocks must be nested in their loops");
+ assert(OnStack.count(SUI.getRegionFor(&MBB)) &&
+ "Blocks must be nested in their regions");
}
while (OnStack.size() > 1 && &MBB == WebAssembly::getBottom(OnStack.back()))
OnStack.pop_back();
}
assert(OnStack.pop_back_val() == nullptr &&
- "The function entry block shouldn't actually be a loop header");
+ "The function entry block shouldn't actually be a region header");
assert(OnStack.empty() &&
"Control flow stack pushes and pops should be balanced.");
#endif
<< MF.getName() << '\n');
const auto &MLI = getAnalysis<MachineLoopInfo>();
+ const auto &WEI = getAnalysis<WebAssemblyExceptionInfo>();
auto &MDT = getAnalysis<MachineDominatorTree>();
// Liveness is not tracked for VALUE_STACK physreg.
MF.getRegInfo().invalidateLiveness();
- // Sort the blocks, with contiguous loops.
- SortBlocks(MF, MLI, MDT);
+ // Sort the blocks, with contiguous sort regions.
+ SortBlocks(MF, MLI, WEI, MDT);
return true;
}
--- /dev/null
+; RUN: llc < %s -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals -disable-block-placement -verify-machineinstrs -fast-isel=false -machine-sink-split-probability-threshold=0 -cgp-freq-ratio-to-skip-merge=1000 -exception-model=wasm -mattr=+exception-handling | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+@_ZTIi = external constant i8*
+@_ZTId = external constant i8*
+
+; Simple test case with two catch clauses
+
+; CHECK-LABEL: test0
+; CHECK: call foo@FUNCTION
+; CHECK: .LBB0_1:
+; CHECK: i32.catch
+; CHECK: i32.call $drop=, _Unwind_CallPersonality@FUNCTION
+; CHECK: i32.call $drop=, __cxa_begin_catch@FUNCTION
+; CHECK: call bar@FUNCTION
+; CHECK: call __cxa_end_catch@FUNCTION
+; CHECK: .LBB0_3:
+; CHECK: i32.call $drop=, __cxa_begin_catch@FUNCTION
+; CHECK: call __cxa_end_catch@FUNCTION
+; CHECK: .LBB0_5:
+; CHECK: call __cxa_rethrow@FUNCTION
+; CHECK: .LBB0_6:
+; CHECK: return
+define void @test0() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+entry:
+ invoke void @foo()
+ to label %try.cont unwind label %catch.dispatch
+
+catch.dispatch: ; preds = %entry
+ %0 = catchswitch within none [label %catch.start] unwind to caller
+
+catch.start: ; preds = %catch.dispatch
+ %1 = catchpad within %0 [i8* bitcast (i8** @_ZTIi to i8*), i8* bitcast (i8** @_ZTId to i8*)]
+ %2 = call i8* @llvm.wasm.get.exception(token %1)
+ %3 = call i32 @llvm.wasm.get.ehselector(token %1)
+ %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
+ %matches = icmp eq i32 %3, %4
+ br i1 %matches, label %catch2, label %catch.fallthrough
+
+catch2: ; preds = %catch.start
+ %5 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+ %6 = bitcast i8* %5 to i32*
+ %7 = load i32, i32* %6, align 4
+ call void @bar() [ "funclet"(token %1) ]
+ call void @__cxa_end_catch() [ "funclet"(token %1) ]
+ catchret from %1 to label %try.cont
+
+catch.fallthrough: ; preds = %catch.start
+ %8 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTId to i8*))
+ %matches1 = icmp eq i32 %3, %8
+ br i1 %matches1, label %catch, label %rethrow
+
+catch: ; preds = %catch.fallthrough
+ %9 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+ %10 = bitcast i8* %9 to double*
+ %11 = load double, double* %10, align 8
+ call void @__cxa_end_catch() [ "funclet"(token %1) ]
+ catchret from %1 to label %try.cont
+
+rethrow: ; preds = %catch.fallthrough
+ call void @__cxa_rethrow() [ "funclet"(token %1) ]
+ unreachable
+
+try.cont: ; preds = %entry, %catch, %catch2
+ ret void
+}
+
+; Nested try-catches within a catch
+
+; CHECK-LABEL: test1
+; CHECK: call foo@FUNCTION
+; CHECK: .LBB1_1:
+; CHECK: i32.catch $0=, 0
+; CHECK: i32.call $drop=, _Unwind_CallPersonality@FUNCTION, $0
+; CHECK: i32.call $drop=, __cxa_begin_catch@FUNCTION, $0
+; CHECK: call foo@FUNCTION
+; CHECK: .LBB1_3:
+; CHECK: i32.catch $0=, 0
+; CHECK: i32.call $drop=, _Unwind_CallPersonality@FUNCTION, $0
+; CHECK: i32.call $drop=, __cxa_begin_catch@FUNCTION, $0
+; CHECK: call foo@FUNCTION
+; CHECK: .LBB1_5:
+; CHECK: catch_all
+; CHECK: call __cxa_end_catch@FUNCTION
+; CHECK: rethrow
+; CHECK: .LBB1_6:
+; CHECK: call __cxa_rethrow@FUNCTION
+; CHECK: rethrow
+; CHECK: .LBB1_7:
+; CHECK: call __cxa_end_catch@FUNCTION
+; CHECK: .LBB1_8:
+; CHECK: catch_all
+; CHECK: call __cxa_end_catch@FUNCTION
+; CHECK: .LBB1_9:
+; CHECK: call __cxa_rethrow@FUNCTION
+; CHECK: rethrow
+; CHECK: .LBB1_10:
+; CHECK: call __cxa_end_catch@FUNCTION
+; CHECK: .LBB1_11:
+; CHECK: return
+define hidden void @test1() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+entry:
+ invoke void @foo()
+ to label %try.cont11 unwind label %catch.dispatch
+
+catch.dispatch: ; preds = %entry
+ %0 = catchswitch within none [label %catch.start] unwind to caller
+
+catch.start: ; preds = %catch.dispatch
+ %1 = catchpad within %0 [i8* bitcast (i8** @_ZTIi to i8*)]
+ %2 = call i8* @llvm.wasm.get.exception(token %1)
+ %3 = call i32 @llvm.wasm.get.ehselector(token %1)
+ %4 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
+ %matches = icmp eq i32 %3, %4
+ br i1 %matches, label %catch, label %rethrow
+
+catch: ; preds = %catch.start
+ %5 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+ %6 = bitcast i8* %5 to i32*
+ %7 = load i32, i32* %6, align 4
+ invoke void @foo() [ "funclet"(token %1) ]
+ to label %try.cont unwind label %catch.dispatch2
+
+catch.dispatch2: ; preds = %catch
+ %8 = catchswitch within %1 [label %catch.start3] unwind label %ehcleanup9
+
+catch.start3: ; preds = %catch.dispatch2
+ %9 = catchpad within %8 [i8* bitcast (i8** @_ZTIi to i8*)]
+ %10 = call i8* @llvm.wasm.get.exception(token %9)
+ %11 = call i32 @llvm.wasm.get.ehselector(token %9)
+ %12 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
+ %matches4 = icmp eq i32 %11, %12
+ br i1 %matches4, label %catch6, label %rethrow5
+
+catch6: ; preds = %catch.start3
+ %13 = call i8* @__cxa_begin_catch(i8* %10) [ "funclet"(token %9) ]
+ %14 = bitcast i8* %13 to i32*
+ %15 = load i32, i32* %14, align 4
+ invoke void @foo() [ "funclet"(token %9) ]
+ to label %invoke.cont8 unwind label %ehcleanup
+
+invoke.cont8: ; preds = %catch6
+ call void @__cxa_end_catch() [ "funclet"(token %9) ]
+ catchret from %9 to label %try.cont
+
+rethrow5: ; preds = %catch.start3
+ invoke void @__cxa_rethrow() [ "funclet"(token %9) ]
+ to label %unreachable unwind label %ehcleanup9
+
+try.cont: ; preds = %catch, %invoke.cont8
+ call void @__cxa_end_catch() [ "funclet"(token %1) ]
+ catchret from %1 to label %try.cont11
+
+rethrow: ; preds = %catch.start
+ call void @__cxa_rethrow() [ "funclet"(token %1) ]
+ unreachable
+
+try.cont11: ; preds = %entry, %try.cont
+ ret void
+
+ehcleanup: ; preds = %catch6
+ %16 = cleanuppad within %9 []
+ call void @__cxa_end_catch() [ "funclet"(token %16) ]
+ cleanupret from %16 unwind label %ehcleanup9
+
+ehcleanup9: ; preds = %ehcleanup, %rethrow5, %catch.dispatch2
+ %17 = cleanuppad within %1 []
+ call void @__cxa_end_catch() [ "funclet"(token %17) ]
+ cleanupret from %17 unwind to caller
+
+unreachable: ; preds = %rethrow5
+ unreachable
+}
+
+; Nested loop within a catch clause
+
+; CHECK-LABEL: test2
+; CHECK: call foo@FUNCTION
+; CHECK: .LBB2_1:
+; CHECK: i32.catch
+; CHECK: i32.call $drop=, __cxa_begin_catch@FUNCTION
+; CHECK: .LBB2_2:
+; CHECK: call foo@FUNCTION
+; CHECK: .LBB2_4:
+; CHECK: catch_all
+; CHECK: call __cxa_end_catch@FUNCTION
+; CHECK: .LBB2_5:
+; CHECK: i32.catch
+; CHECK: call __clang_call_terminate@FUNCTION
+; CHECK: unreachable
+; CHECK: .LBB2_6:
+; CHECK: catch_all
+; CHECK: call _ZSt9terminatev@FUNCTION
+; CHECK: unreachable
+; CHECK: .LBB2_7:
+; CHECK: rethrow
+; CHECK: .LBB2_8:
+; CHECK: call __cxa_end_catch@FUNCTION
+; CHECK: .LBB2_10:
+; CHECK: return
+define void @test2() personality i8* bitcast (i32 (...)* @__gxx_wasm_personality_v0 to i8*) {
+entry:
+ invoke void @foo()
+ to label %try.cont unwind label %catch.dispatch
+
+catch.dispatch: ; preds = %entry
+ %0 = catchswitch within none [label %catch.start] unwind to caller
+
+catch.start: ; preds = %catch.dispatch
+ %1 = catchpad within %0 [i8* null]
+ %2 = call i8* @llvm.wasm.get.exception(token %1)
+ %3 = call i32 @llvm.wasm.get.ehselector(token %1)
+ %4 = call i8* @__cxa_begin_catch(i8* %2) [ "funclet"(token %1) ]
+ br label %for.cond
+
+for.cond: ; preds = %for.inc, %catch.start
+ %i.0 = phi i32 [ 0, %catch.start ], [ %inc, %for.inc ]
+ %cmp = icmp slt i32 %i.0, 50
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ invoke void @foo() [ "funclet"(token %1) ]
+ to label %for.inc unwind label %ehcleanup
+
+for.inc: ; preds = %for.body
+ %inc = add nsw i32 %i.0, 1
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ call void @__cxa_end_catch() [ "funclet"(token %1) ]
+ catchret from %1 to label %try.cont
+
+try.cont: ; preds = %for.end, %entry
+ ret void
+
+ehcleanup: ; preds = %for.body
+ %5 = cleanuppad within %1 []
+ invoke void @__cxa_end_catch() [ "funclet"(token %5) ]
+ to label %invoke.cont2 unwind label %terminate
+
+invoke.cont2: ; preds = %ehcleanup
+ cleanupret from %5 unwind to caller
+
+terminate: ; preds = %ehcleanup
+ %6 = cleanuppad within %5 []
+ %7 = call i8* @llvm.wasm.get.exception(token %6)
+ call void @__clang_call_terminate(i8* %7) [ "funclet"(token %6) ]
+ unreachable
+}
+
+declare void @foo()
+declare void @bar()
+declare i32 @__gxx_wasm_personality_v0(...)
+declare i8* @llvm.wasm.get.exception(token)
+declare i32 @llvm.wasm.get.ehselector(token)
+declare i32 @llvm.eh.typeid.for(i8*)
+declare i8* @__cxa_begin_catch(i8*)
+declare void @__cxa_end_catch()
+declare void @__cxa_rethrow()
+declare void @__clang_call_terminate(i8*)
+declare void @_ZSt9terminatev()