From: cynecx Date: Thu, 13 May 2021 18:05:11 +0000 (+0100) Subject: Support unwinding from inline assembly X-Git-Tag: llvmorg-14-init~6769 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8ec9fd483949ca3b23053effcac226dcc56e7a95;p=platform%2Fupstream%2Fllvm.git Support unwinding from inline assembly I've taken the following steps to add unwinding support from inline assembly: 1) Add a new `unwind` "attribute" (like `sideeffect`) to the asm syntax: ``` invoke void asm sideeffect unwind "call thrower", "~{dirflag},~{fpsr},~{flags}"() to label %exit unwind label %uexit ``` 2.) Add Bitcode writing/reading support + LLVM-IR parsing. 3.) Emit EHLabels around inline assembly lowering (SelectionDAGBuilder + GlobalISel) when `InlineAsm::canThrow` is enabled. 4.) Tweak InstCombineCalls/InlineFunction pass to not mark inline assembly "calls" as nounwind. 5.) Add clang support by introducing a new clobber: "unwind", which lower to the `canThrow` being enabled. 6.) Don't allow unwinding callbr. Reviewed By: Amanieu Differential Revision: https://reviews.llvm.org/D95745 --- diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td index fef8baf..0683469 100644 --- a/clang/include/clang/Basic/DiagnosticSemaKinds.td +++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td @@ -8607,6 +8607,7 @@ let CategoryName = "Inline Assembly Issue" in { "asm constraint has an unexpected number of alternatives: %0 vs %1">; def err_asm_incomplete_type : Error<"asm operand has incomplete type %0">; def err_asm_unknown_register_name : Error<"unknown register name '%0' in asm">; + def err_asm_unwind_and_goto : Error<"unwind clobber can't be used with asm goto">; def err_asm_invalid_global_var_reg : Error<"register '%0' unsuitable for " "global register variables on this target">; def err_asm_register_size_mismatch : Error<"size of register '%0' does not " diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp index a431ec1..2f200a9 100644 --- a/clang/lib/Basic/TargetInfo.cpp +++ b/clang/lib/Basic/TargetInfo.cpp @@ -479,8 +479,8 @@ static StringRef removeGCCRegisterPrefix(StringRef Name) { /// a valid clobber in an inline asm statement. This is used by /// Sema. bool TargetInfo::isValidClobber(StringRef Name) const { - return (isValidGCCRegisterName(Name) || - Name == "memory" || Name == "cc"); + return (isValidGCCRegisterName(Name) || Name == "memory" || Name == "cc" || + Name == "unwind"); } /// isValidGCCRegisterName - Returns whether the passed in string diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp index 0fc0ba1..cc9b1ae 100644 --- a/clang/lib/CodeGen/CGStmt.cpp +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -2138,13 +2138,15 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str, } static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect, - bool ReadOnly, bool ReadNone, bool NoMerge, - const AsmStmt &S, + bool HasUnwindClobber, bool ReadOnly, + bool ReadNone, bool NoMerge, const AsmStmt &S, const std::vector &ResultRegTypes, CodeGenFunction &CGF, std::vector &RegResults) { - Result.addAttribute(llvm::AttributeList::FunctionIndex, - llvm::Attribute::NoUnwind); + if (!HasUnwindClobber) + Result.addAttribute(llvm::AttributeList::FunctionIndex, + llvm::Attribute::NoUnwind); + if (NoMerge) Result.addAttribute(llvm::AttributeList::FunctionIndex, llvm::Attribute::NoMerge); @@ -2491,13 +2493,18 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { } Constraints += InOutConstraints; + bool HasUnwindClobber = false; + // Clobbers for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { StringRef Clobber = S.getClobber(i); if (Clobber == "memory") ReadOnly = ReadNone = false; - else if (Clobber != "cc") { + else if (Clobber == "unwind") { + HasUnwindClobber = true; + continue; + } else if (Clobber != "cc") { Clobber = getTarget().getNormalizedGCCRegisterName(Clobber); if (CGM.getCodeGenOpts().StackClashProtector && getTarget().isSPRegName(Clobber)) { @@ -2531,6 +2538,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { Constraints += '}'; } + assert(!(HasUnwindClobber && IsGCCAsmGoto) && + "unwind clobber can't be used with asm goto"); + // Add machine specific clobbers std::string MachineClobbers = getTarget().getClobbers(); if (!MachineClobbers.empty()) { @@ -2553,23 +2563,28 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; llvm::InlineAsm::AsmDialect AsmDialect = isa(&S) ? llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT; - llvm::InlineAsm *IA = - llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect, - /* IsAlignStack */ false, AsmDialect); + llvm::InlineAsm *IA = llvm::InlineAsm::get( + FTy, AsmString, Constraints, HasSideEffect, + /* IsAlignStack */ false, AsmDialect, HasUnwindClobber); std::vector RegResults; if (IsGCCAsmGoto) { llvm::CallBrInst *Result = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args); EmitBlock(Fallthrough); - UpdateAsmCallInst(cast(*Result), HasSideEffect, ReadOnly, - ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes, - *this, RegResults); + UpdateAsmCallInst(cast(*Result), HasSideEffect, false, + ReadOnly, ReadNone, InNoMergeAttributedStmt, S, + ResultRegTypes, *this, RegResults); + } else if (HasUnwindClobber) { + llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, ""); + UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone, + InNoMergeAttributedStmt, S, ResultRegTypes, *this, + RegResults); } else { llvm::CallInst *Result = Builder.CreateCall(IA, Args, getBundlesForFunclet(IA)); - UpdateAsmCallInst(cast(*Result), HasSideEffect, ReadOnly, - ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes, - *this, RegResults); + UpdateAsmCallInst(cast(*Result), HasSideEffect, false, + ReadOnly, ReadNone, InNoMergeAttributedStmt, S, + ResultRegTypes, *this, RegResults); } assert(RegResults.size() == ResultRegTypes.size()); diff --git a/clang/lib/Sema/SemaStmtAsm.cpp b/clang/lib/Sema/SemaStmtAsm.cpp index 3b631bf..a50ea7ee 100644 --- a/clang/lib/Sema/SemaStmtAsm.cpp +++ b/clang/lib/Sema/SemaStmtAsm.cpp @@ -228,7 +228,7 @@ getClobberConflictLocation(MultiExprArg Exprs, StringLiteral **Constraints, StringRef Clobber = Clobbers[i]->getString(); // We only check registers, therefore we don't check cc and memory // clobbers - if (Clobber == "cc" || Clobber == "memory") + if (Clobber == "cc" || Clobber == "memory" || Clobber == "unwind") continue; Clobber = Target.getNormalizedGCCRegisterName(Clobber, true); // Go over the output's registers we collected @@ -453,6 +453,8 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, << Info.getConstraintStr(); } + Optional UnwindClobberLoc; + // Check that the clobbers are valid. for (unsigned i = 0; i != NumClobbers; i++) { StringLiteral *Literal = Clobbers[i]; @@ -468,6 +470,19 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, NumInputs, Names, Constraints, Exprs.data(), AsmString, NumClobbers, Clobbers, NumLabels, RParenLoc); } + + if (Clobber == "unwind") { + UnwindClobberLoc = Literal->getBeginLoc(); + } + } + + // Using unwind clobber and asm-goto together is not supported right now. + if (UnwindClobberLoc && NumLabels > 0) { + targetDiag(*UnwindClobberLoc, diag::err_asm_unwind_and_goto); + return new (Context) + GCCAsmStmt(Context, AsmLoc, IsSimple, IsVolatile, NumOutputs, NumInputs, + Names, Constraints, Exprs.data(), AsmString, NumClobbers, + Clobbers, NumLabels, RParenLoc); } GCCAsmStmt *NS = diff --git a/clang/test/CodeGenCXX/unwind-inline-asm.cpp b/clang/test/CodeGenCXX/unwind-inline-asm.cpp new file mode 100644 index 0000000..725b043 --- /dev/null +++ b/clang/test/CodeGenCXX/unwind-inline-asm.cpp @@ -0,0 +1,34 @@ +// RUN: %clang_cc1 -triple x86_64-unknown-linux -emit-llvm -DUNWIND -fcxx-exceptions -fexceptions -o - %s | FileCheck -check-prefixes CHECK,CHECK-UNWIND %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux -emit-llvm -fcxx-exceptions -fexceptions -o - %s | FileCheck -check-prefixes CHECK,CHECK-NO-UNWIND %s + +extern "C" void printf(const char *fmt, ...); + +struct DropBomb { + bool defused = false; + + ~DropBomb() { + if (defused) { + return; + } + printf("Boom!\n"); + } +}; + +extern "C" void trap() { + throw "Trap"; +} + +// CHECK: define dso_local void @test() +extern "C" void test() { + DropBomb bomb; +// CHECK-UNWIND: invoke void asm sideeffect unwind "call trap" +// CHECK-NO-UNWIND: call void asm sideeffect "call trap" +#ifdef UNWIND + asm volatile("call trap" :: + : "unwind"); +#else + asm volatile("call trap" :: + :); +#endif + bomb.defused = true; +} diff --git a/llvm/bindings/go/llvm/ir.go b/llvm/bindings/go/llvm/ir.go index bb8896e..77213ee 100644 --- a/llvm/bindings/go/llvm/ir.go +++ b/llvm/bindings/go/llvm/ir.go @@ -1304,12 +1304,12 @@ func (v Value) IncomingBlock(i int) (bb BasicBlock) { } // Operations on inline assembly -func InlineAsm(t Type, asmString, constraints string, hasSideEffects, isAlignStack bool, dialect InlineAsmDialect) (rv Value) { +func InlineAsm(t Type, asmString, constraints string, hasSideEffects, isAlignStack bool, dialect InlineAsmDialect, canThrow bool) (rv Value) { casm := C.CString(asmString) defer C.free(unsafe.Pointer(casm)) cconstraints := C.CString(constraints) defer C.free(unsafe.Pointer(cconstraints)) - rv.C = C.LLVMGetInlineAsm(t.C, casm, C.size_t(len(asmString)), cconstraints, C.size_t(len(constraints)), boolToLLVMBool(hasSideEffects), boolToLLVMBool(isAlignStack), C.LLVMInlineAsmDialect(dialect)) + rv.C = C.LLVMGetInlineAsm(t.C, casm, C.size_t(len(asmString)), cconstraints, C.size_t(len(constraints)), boolToLLVMBool(hasSideEffects), boolToLLVMBool(isAlignStack), C.LLVMInlineAsmDialect(dialect), boolToLLVMBool(canThrow)) return } diff --git a/llvm/include/llvm-c/Core.h b/llvm/include/llvm-c/Core.h index b320052..7a17b69 100644 --- a/llvm/include/llvm-c/Core.h +++ b/llvm/include/llvm-c/Core.h @@ -872,11 +872,11 @@ void LLVMAppendModuleInlineAsm(LLVMModuleRef M, const char *Asm, size_t Len); * * @see InlineAsm::get() */ -LLVMValueRef LLVMGetInlineAsm(LLVMTypeRef Ty, - char *AsmString, size_t AsmStringSize, - char *Constraints, size_t ConstraintsSize, - LLVMBool HasSideEffects, LLVMBool IsAlignStack, - LLVMInlineAsmDialect Dialect); +LLVMValueRef LLVMGetInlineAsm(LLVMTypeRef Ty, char *AsmString, + size_t AsmStringSize, char *Constraints, + size_t ConstraintsSize, LLVMBool HasSideEffects, + LLVMBool IsAlignStack, + LLVMInlineAsmDialect Dialect, LLVMBool CanThrow); /** * Obtain the context to which this module is associated. diff --git a/llvm/include/llvm/Bitcode/LLVMBitCodes.h b/llvm/include/llvm/Bitcode/LLVMBitCodes.h index bc1c5d4..36c0354 100644 --- a/llvm/include/llvm/Bitcode/LLVMBitCodes.h +++ b/llvm/include/llvm/Bitcode/LLVMBitCodes.h @@ -373,12 +373,15 @@ enum ConstantsCodes { CST_CODE_CE_INBOUNDS_GEP = 20, // INBOUNDS_GEP: [n x operands] CST_CODE_BLOCKADDRESS = 21, // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#] CST_CODE_DATA = 22, // DATA: [n x elements] - CST_CODE_INLINEASM = 23, // INLINEASM: [sideeffect|alignstack| + CST_CODE_INLINEASM_OLD2 = 23, // INLINEASM: [sideeffect|alignstack| // asmdialect,asmstr,conststr] CST_CODE_CE_GEP_WITH_INRANGE_INDEX = 24, // [opty, flags, n x operands] CST_CODE_CE_UNOP = 25, // CE_UNOP: [opcode, opval] CST_CODE_POISON = 26, // POISON CST_CODE_DSO_LOCAL_EQUIVALENT = 27, // DSO_LOCAL_EQUIVALENT [gvty, gv] + CST_CODE_INLINEASM = 28, // INLINEASM: [sideeffect|alignstack| + // asmdialect|unwind, + // asmstr,conststr] }; /// CastOpcodes - These are values used in the bitcode files to encode which diff --git a/llvm/include/llvm/IR/InlineAsm.h b/llvm/include/llvm/IR/InlineAsm.h index b6f3770..1a0767a 100644 --- a/llvm/include/llvm/IR/InlineAsm.h +++ b/llvm/include/llvm/IR/InlineAsm.h @@ -44,10 +44,11 @@ private: bool HasSideEffects; bool IsAlignStack; AsmDialect Dialect; + bool CanThrow; InlineAsm(FunctionType *Ty, const std::string &AsmString, const std::string &Constraints, bool hasSideEffects, - bool isAlignStack, AsmDialect asmDialect); + bool isAlignStack, AsmDialect asmDialect, bool canThrow); /// When the ConstantUniqueMap merges two types and makes two InlineAsms /// identical, it destroys one of them with this method. @@ -62,11 +63,12 @@ public: static InlineAsm *get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack = false, - AsmDialect asmDialect = AD_ATT); + AsmDialect asmDialect = AD_ATT, bool canThrow = false); bool hasSideEffects() const { return HasSideEffects; } bool isAlignStack() const { return IsAlignStack; } AsmDialect getDialect() const { return Dialect; } + bool canThrow() const { return CanThrow; } /// getType - InlineAsm's are always pointers. /// diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp index df2bea0..55cbaac 100644 --- a/llvm/lib/AsmParser/LLParser.cpp +++ b/llvm/lib/AsmParser/LLParser.cpp @@ -3458,18 +3458,19 @@ bool LLParser::parseValID(ValID &ID, PerFunctionState *PFS) { case lltok::kw_asm: { // ValID ::= 'asm' SideEffect? AlignStack? IntelDialect? STRINGCONSTANT ',' // STRINGCONSTANT - bool HasSideEffect, AlignStack, AsmDialect; + bool HasSideEffect, AlignStack, AsmDialect, CanThrow; Lex.Lex(); if (parseOptionalToken(lltok::kw_sideeffect, HasSideEffect) || parseOptionalToken(lltok::kw_alignstack, AlignStack) || parseOptionalToken(lltok::kw_inteldialect, AsmDialect) || + parseOptionalToken(lltok::kw_unwind, CanThrow) || parseStringConstant(ID.StrVal) || parseToken(lltok::comma, "expected comma in inline asm expression") || parseToken(lltok::StringConstant, "expected constraint string")) return true; ID.StrVal2 = Lex.getStrVal(); - ID.UIntVal = unsigned(HasSideEffect) | (unsigned(AlignStack)<<1) | - (unsigned(AsmDialect)<<2); + ID.UIntVal = unsigned(HasSideEffect) | (unsigned(AlignStack) << 1) | + (unsigned(AsmDialect) << 2) | (unsigned(CanThrow) << 3); ID.Kind = ValID::t_InlineAsm; return false; } @@ -5569,9 +5570,9 @@ bool LLParser::convertValIDToValue(Type *Ty, ValID &ID, Value *&V, case ValID::t_InlineAsm: { if (!ID.FTy || !InlineAsm::Verify(ID.FTy, ID.StrVal2)) return error(ID.Loc, "invalid type for inline asm constraint string"); - V = InlineAsm::get(ID.FTy, ID.StrVal, ID.StrVal2, ID.UIntVal & 1, - (ID.UIntVal >> 1) & 1, - (InlineAsm::AsmDialect(ID.UIntVal >> 2))); + V = InlineAsm::get( + ID.FTy, ID.StrVal, ID.StrVal2, ID.UIntVal & 1, (ID.UIntVal >> 1) & 1, + InlineAsm::AsmDialect((ID.UIntVal >> 2) & 1), (ID.UIntVal >> 3) & 1); return false; } case ValID::t_GlobalName: diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp index 38d7b77..91a65c7 100644 --- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -2830,7 +2830,7 @@ Error BitcodeReader::parseConstants() { } // This version adds support for the asm dialect keywords (e.g., // inteldialect). - case bitc::CST_CODE_INLINEASM: { + case bitc::CST_CODE_INLINEASM_OLD2: { if (Record.size() < 2) return error("Invalid record"); std::string AsmStr, ConstrStr; @@ -2855,6 +2855,33 @@ Error BitcodeReader::parseConstants() { InlineAsm::AsmDialect(AsmDialect)); break; } + // This version adds support for the unwind keyword. + case bitc::CST_CODE_INLINEASM: { + if (Record.size() < 2) + return error("Invalid record"); + std::string AsmStr, ConstrStr; + bool HasSideEffects = Record[0] & 1; + bool IsAlignStack = (Record[0] >> 1) & 1; + unsigned AsmDialect = (Record[0] >> 2) & 1; + bool CanThrow = (Record[0] >> 3) & 1; + unsigned AsmStrSize = Record[1]; + if (2 + AsmStrSize >= Record.size()) + return error("Invalid record"); + unsigned ConstStrSize = Record[2 + AsmStrSize]; + if (3 + AsmStrSize + ConstStrSize > Record.size()) + return error("Invalid record"); + + for (unsigned i = 0; i != AsmStrSize; ++i) + AsmStr += (char)Record[2 + i]; + for (unsigned i = 0; i != ConstStrSize; ++i) + ConstrStr += (char)Record[3 + AsmStrSize + i]; + UpgradeInlineAsmString(&AsmStr); + V = InlineAsm::get( + cast(getPointerElementFlatType(CurFullTy)), AsmStr, + ConstrStr, HasSideEffects, IsAlignStack, + InlineAsm::AsmDialect(AsmDialect), CanThrow); + break; + } case bitc::CST_CODE_BLOCKADDRESS:{ if (Record.size() < 3) return error("Invalid record"); diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp index 3a7edd4..7261e30 100644 --- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -2429,9 +2429,9 @@ void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal, } if (const InlineAsm *IA = dyn_cast(V)) { - Record.push_back(unsigned(IA->hasSideEffects()) | - unsigned(IA->isAlignStack()) << 1 | - unsigned(IA->getDialect()&1) << 2); + Record.push_back( + unsigned(IA->hasSideEffects()) | unsigned(IA->isAlignStack()) << 1 | + unsigned(IA->getDialect() & 1) << 2 | unsigned(IA->canThrow()) << 3); // Add the asm string. const std::string &AsmStr = IA->getAsmString(); diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index f9a5548..be7a7641 100644 --- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -2412,8 +2412,6 @@ bool IRTranslator::translateInvoke(const User &U, const BasicBlock *EHPadBB = I.getSuccessor(1); const Function *Fn = I.getCalledFunction(); - if (I.isInlineAsm()) - return false; // FIXME: support invoking patchpoint and statepoint intrinsics. if (Fn && Fn->isIntrinsic()) @@ -2431,12 +2429,37 @@ bool IRTranslator::translateInvoke(const User &U, if (!isa(EHPadBB->getFirstNonPHI())) return false; + bool LowerInlineAsm = false; + if (I.isInlineAsm()) { + const InlineAsm *IA = cast(I.getCalledOperand()); + if (!IA->canThrow()) { + // Fast path without emitting EH_LABELs. + + if (!translateInlineAsm(I, MIRBuilder)) + return false; + + MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB(), + *ReturnMBB = &getMBB(*ReturnBB); + + // Update successor info. + addSuccessorWithProb(InvokeMBB, ReturnMBB, BranchProbability::getOne()); + + MIRBuilder.buildBr(*ReturnMBB); + return true; + } else { + LowerInlineAsm = true; + } + } + // Emit the actual call, bracketed by EH_LABELs so that the MF knows about // the region covered by the try. MCSymbol *BeginSymbol = Context.createTempSymbol(); MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol); - if (!translateCallBase(I, MIRBuilder)) + if (LowerInlineAsm) { + if (!translateInlineAsm(I, MIRBuilder)) + return false; + } else if (!translateCallBase(I, MIRBuilder)) return false; MCSymbol *EndSymbol = Context.createTempSymbol(); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 4b2d811..e9eea5b 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -2873,7 +2873,7 @@ void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { const Value *Callee(I.getCalledOperand()); const Function *Fn = dyn_cast(Callee); if (isa(Callee)) - visitInlineAsm(I); + visitInlineAsm(I, EHPadBB); else if (Fn && Fn->isIntrinsic()) { switch (Fn->getIntrinsicID()) { default: @@ -7281,36 +7281,72 @@ void SelectionDAGBuilder::visitVectorPredicationIntrinsic( setValue(&VPIntrin, Result); } -std::pair -SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI, - const BasicBlock *EHPadBB) { +SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain, + const BasicBlock *EHPadBB, + MCSymbol *&BeginLabel) { MachineFunction &MF = DAG.getMachineFunction(); MachineModuleInfo &MMI = MF.getMMI(); - MCSymbol *BeginLabel = nullptr; - if (EHPadBB) { - // Insert a label before the invoke call to mark the try range. This can be - // used to detect deletion of the invoke via the MachineModuleInfo. - BeginLabel = MMI.getContext().createTempSymbol(); + // Insert a label before the invoke call to mark the try range. This can be + // used to detect deletion of the invoke via the MachineModuleInfo. + BeginLabel = MMI.getContext().createTempSymbol(); - // For SjLj, keep track of which landing pads go with which invokes - // so as to maintain the ordering of pads in the LSDA. - unsigned CallSiteIndex = MMI.getCurrentCallSite(); - if (CallSiteIndex) { - MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex); - LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex); + // For SjLj, keep track of which landing pads go with which invokes + // so as to maintain the ordering of pads in the LSDA. + unsigned CallSiteIndex = MMI.getCurrentCallSite(); + if (CallSiteIndex) { + MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex); + LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex); - // Now that the call site is handled, stop tracking it. - MMI.setCurrentCallSite(0); - } + // Now that the call site is handled, stop tracking it. + MMI.setCurrentCallSite(0); + } + + return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel); +} +SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II, + const BasicBlock *EHPadBB, + MCSymbol *BeginLabel) { + assert(BeginLabel && "BeginLabel should've been set"); + + MachineFunction &MF = DAG.getMachineFunction(); + MachineModuleInfo &MMI = MF.getMMI(); + + // Insert a label at the end of the invoke call to mark the try range. This + // can be used to detect deletion of the invoke via the MachineModuleInfo. + MCSymbol *EndLabel = MMI.getContext().createTempSymbol(); + Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel); + + // Inform MachineModuleInfo of range. + auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); + // There is a platform (e.g. wasm) that uses funclet style IR but does not + // actually use outlined funclets and their LSDA info style. + if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) { + assert(II && "II should've been set"); + WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo(); + EHInfo->addIPToStateRange(II, BeginLabel, EndLabel); + } else if (!isScopedEHPersonality(Pers)) { + assert(EHPadBB); + MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel); + } + + return Chain; +} + +std::pair +SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI, + const BasicBlock *EHPadBB) { + MCSymbol *BeginLabel = nullptr; + + if (EHPadBB) { // Both PendingLoads and PendingExports must be flushed here; // this call might not return. (void)getRoot(); - DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel)); - + DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel)); CLI.setChain(getRoot()); } + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); std::pair Result = TLI.LowerCallTo(CLI); @@ -7332,22 +7368,8 @@ SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI, } if (EHPadBB) { - // Insert a label at the end of the invoke call to mark the try range. This - // can be used to detect deletion of the invoke via the MachineModuleInfo. - MCSymbol *EndLabel = MMI.getContext().createTempSymbol(); - DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel)); - - // Inform MachineModuleInfo of range. - auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); - // There is a platform (e.g. wasm) that uses funclet style IR but does not - // actually use outlined funclets and their LSDA info style. - if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) { - assert(CLI.CB); - WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo(); - EHInfo->addIPToStateRange(cast(CLI.CB), BeginLabel, EndLabel); - } else if (!isScopedEHPersonality(Pers)) { - MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel); - } + DAG.setRoot(lowerEndEH(getRoot(), cast_or_null(CLI.CB), EHPadBB, + BeginLabel)); } return Result; @@ -8317,7 +8339,8 @@ public: } // end anonymous namespace /// visitInlineAsm - Handle a call to an InlineAsm object. -void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) { +void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, + const BasicBlock *EHPadBB) { const InlineAsm *IA = cast(Call.getCalledOperand()); /// ConstraintOperands - Information about all of the constraints. @@ -8405,19 +8428,28 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) { ExtraInfo.update(T); } - // We won't need to flush pending loads if this asm doesn't touch // memory and is nonvolatile. SDValue Flag, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot(); + bool EmitEHLabels = isa(Call) && IA->canThrow(); + if (EmitEHLabels) { + assert(EHPadBB && "InvokeInst must have an EHPadBB"); + } bool IsCallBr = isa(Call); - if (IsCallBr) { - // If this is a callbr we need to flush pending exports since inlineasm_br - // is a terminator. We need to do this before nodes are glued to - // the inlineasm_br node. + + if (IsCallBr || EmitEHLabels) { + // If this is a callbr or invoke we need to flush pending exports since + // inlineasm_br and invoke are terminators. + // We need to do this before nodes are glued to the inlineasm_br node. Chain = getControlRoot(); } + MCSymbol *BeginLabel = nullptr; + if (EmitEHLabels) { + Chain = lowerStartEH(Chain, EHPadBB, BeginLabel); + } + // Second pass over the constraints: compute which constraint option to use. for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { // If this is an output operand with a matching input operand, look up the @@ -8808,8 +8840,13 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call) { if (!OutChains.empty()) Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains); + if (EmitEHLabels) { + Chain = lowerEndEH(Chain, cast(&Call), EHPadBB, BeginLabel); + } + // Only Update Root if inline assembly has a memory effect. - if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr) + if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr || + EmitEHLabels) DAG.setRoot(Chain); } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h index 156f4c0..084ad5d 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -759,7 +759,8 @@ private: void visitStoreToSwiftError(const StoreInst &I); void visitFreeze(const FreezeInst &I); - void visitInlineAsm(const CallBase &Call); + void visitInlineAsm(const CallBase &Call, + const BasicBlock *EHPadBB = nullptr); void visitIntrinsicCall(const CallInst &I, unsigned Intrinsic); void visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic); void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic &FPI); @@ -816,6 +817,11 @@ private: /// Lowers CallInst to an external symbol. void lowerCallToExternalSymbol(const CallInst &I, const char *FunctionName); + + SDValue lowerStartEH(SDValue Chain, const BasicBlock *EHPadBB, + MCSymbol *&BeginLabel); + SDValue lowerEndEH(SDValue Chain, const InvokeInst *II, + const BasicBlock *EHPadBB, MCSymbol *BeginLabel); }; /// This struct represents the registers (physical or virtual) diff --git a/llvm/lib/IR/AsmWriter.cpp b/llvm/lib/IR/AsmWriter.cpp index 14a2f42..e33ac67 100644 --- a/llvm/lib/IR/AsmWriter.cpp +++ b/llvm/lib/IR/AsmWriter.cpp @@ -2461,6 +2461,8 @@ static void WriteAsOperandInternal(raw_ostream &Out, const Value *V, // We don't emit the AD_ATT dialect as it's the assumed default. if (IA->getDialect() == InlineAsm::AD_Intel) Out << "inteldialect "; + if (IA->canThrow()) + Out << "unwind "; Out << '"'; printEscapedString(IA->getAsmString(), Out); Out << "\", \""; diff --git a/llvm/lib/IR/ConstantsContext.h b/llvm/lib/IR/ConstantsContext.h index 95c5ab6..7fc25a8 100644 --- a/llvm/lib/IR/ConstantsContext.h +++ b/llvm/lib/IR/ConstantsContext.h @@ -463,24 +463,26 @@ struct InlineAsmKeyType { bool HasSideEffects; bool IsAlignStack; InlineAsm::AsmDialect AsmDialect; + bool CanThrow; InlineAsmKeyType(StringRef AsmString, StringRef Constraints, FunctionType *FTy, bool HasSideEffects, bool IsAlignStack, - InlineAsm::AsmDialect AsmDialect) + InlineAsm::AsmDialect AsmDialect, bool canThrow) : AsmString(AsmString), Constraints(Constraints), FTy(FTy), HasSideEffects(HasSideEffects), IsAlignStack(IsAlignStack), - AsmDialect(AsmDialect) {} + AsmDialect(AsmDialect), CanThrow(canThrow) {} InlineAsmKeyType(const InlineAsm *Asm, SmallVectorImpl &) : AsmString(Asm->getAsmString()), Constraints(Asm->getConstraintString()), FTy(Asm->getFunctionType()), HasSideEffects(Asm->hasSideEffects()), - IsAlignStack(Asm->isAlignStack()), AsmDialect(Asm->getDialect()) {} + IsAlignStack(Asm->isAlignStack()), AsmDialect(Asm->getDialect()), + CanThrow(Asm->canThrow()) {} bool operator==(const InlineAsmKeyType &X) const { return HasSideEffects == X.HasSideEffects && IsAlignStack == X.IsAlignStack && AsmDialect == X.AsmDialect && AsmString == X.AsmString && Constraints == X.Constraints && - FTy == X.FTy; + FTy == X.FTy && CanThrow == X.CanThrow; } bool operator==(const InlineAsm *Asm) const { @@ -489,12 +491,12 @@ struct InlineAsmKeyType { AsmDialect == Asm->getDialect() && AsmString == Asm->getAsmString() && Constraints == Asm->getConstraintString() && - FTy == Asm->getFunctionType(); + FTy == Asm->getFunctionType() && CanThrow == Asm->canThrow(); } unsigned getHash() const { return hash_combine(AsmString, Constraints, HasSideEffects, IsAlignStack, - AsmDialect, FTy); + AsmDialect, FTy, CanThrow); } using TypeClass = ConstantInfo::TypeClass; @@ -502,7 +504,7 @@ struct InlineAsmKeyType { InlineAsm *create(TypeClass *Ty) const { assert(PointerType::getUnqual(FTy) == Ty); return new InlineAsm(FTy, std::string(AsmString), std::string(Constraints), - HasSideEffects, IsAlignStack, AsmDialect); + HasSideEffects, IsAlignStack, AsmDialect, CanThrow); } }; diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp index d597a69..e46bef1 100644 --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -460,11 +460,11 @@ const char *LLVMGetModuleInlineAsm(LLVMModuleRef M, size_t *Len) { return Str.c_str(); } -LLVMValueRef LLVMGetInlineAsm(LLVMTypeRef Ty, - char *AsmString, size_t AsmStringSize, - char *Constraints, size_t ConstraintsSize, - LLVMBool HasSideEffects, LLVMBool IsAlignStack, - LLVMInlineAsmDialect Dialect) { +LLVMValueRef LLVMGetInlineAsm(LLVMTypeRef Ty, char *AsmString, + size_t AsmStringSize, char *Constraints, + size_t ConstraintsSize, LLVMBool HasSideEffects, + LLVMBool IsAlignStack, + LLVMInlineAsmDialect Dialect, LLVMBool CanThrow) { InlineAsm::AsmDialect AD; switch (Dialect) { case LLVMInlineAsmDialectATT: @@ -477,10 +477,9 @@ LLVMValueRef LLVMGetInlineAsm(LLVMTypeRef Ty, return wrap(InlineAsm::get(unwrap(Ty), StringRef(AsmString, AsmStringSize), StringRef(Constraints, ConstraintsSize), - HasSideEffects, IsAlignStack, AD)); + HasSideEffects, IsAlignStack, AD, CanThrow)); } - /*--.. Operations on module contexts ......................................--*/ LLVMContextRef LLVMGetModuleContext(LLVMModuleRef M) { return wrap(&unwrap(M)->getContext()); diff --git a/llvm/lib/IR/InlineAsm.cpp b/llvm/lib/IR/InlineAsm.cpp index ee30b92..56932b4 100644 --- a/llvm/lib/IR/InlineAsm.cpp +++ b/llvm/lib/IR/InlineAsm.cpp @@ -29,11 +29,11 @@ using namespace llvm; InlineAsm::InlineAsm(FunctionType *FTy, const std::string &asmString, const std::string &constraints, bool hasSideEffects, - bool isAlignStack, AsmDialect asmDialect) + bool isAlignStack, AsmDialect asmDialect, bool canThrow) : Value(PointerType::getUnqual(FTy), Value::InlineAsmVal), AsmString(asmString), Constraints(constraints), FTy(FTy), HasSideEffects(hasSideEffects), IsAlignStack(isAlignStack), - Dialect(asmDialect) { + Dialect(asmDialect), CanThrow(canThrow) { // Do various checks on the constraint string and type. assert(Verify(getFunctionType(), constraints) && "Function type not legal for constraints!"); @@ -41,9 +41,10 @@ InlineAsm::InlineAsm(FunctionType *FTy, const std::string &asmString, InlineAsm *InlineAsm::get(FunctionType *FTy, StringRef AsmString, StringRef Constraints, bool hasSideEffects, - bool isAlignStack, AsmDialect asmDialect) { + bool isAlignStack, AsmDialect asmDialect, + bool canThrow) { InlineAsmKeyType Key(AsmString, Constraints, FTy, hasSideEffects, - isAlignStack, asmDialect); + isAlignStack, asmDialect, canThrow); LLVMContextImpl *pImpl = FTy->getContext().pImpl; return pImpl->InlineAsms.getOrCreate(PointerType::getUnqual(FTy), Key); } diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp index 117c707..1fb2efb 100644 --- a/llvm/lib/IR/Verifier.cpp +++ b/llvm/lib/IR/Verifier.cpp @@ -2779,6 +2779,8 @@ void Verifier::visitIndirectBrInst(IndirectBrInst &BI) { void Verifier::visitCallBrInst(CallBrInst &CBI) { Assert(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!", &CBI); + const InlineAsm *IA = cast(CBI.getCalledOperand()); + Assert(!IA->canThrow(), "Unwinding from Callbr is not allowed"); for (unsigned i = 0, e = CBI.getNumSuccessors(); i != e; ++i) Assert(CBI.getSuccessor(i)->getType()->isLabelTy(), "Callbr successors must all have pointer type!", &CBI); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index c8f8a75..583a6c6 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -39,6 +39,7 @@ #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" +#include "llvm/IR/InlineAsm.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" @@ -2323,9 +2324,13 @@ Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) { } if (isa(Callee) && !Call.doesNotThrow()) { - // Inline asm calls cannot throw - mark them 'nounwind'. - Call.setDoesNotThrow(); - Changed = true; + InlineAsm *IA = cast(Callee); + if (!IA->canThrow()) { + // Normal inline asm calls cannot throw - mark them + // 'nounwind'. + Call.setDoesNotThrow(); + Changed = true; + } } // Try to optimize the call if possible, we require DataLayout for most of diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp index be4c828..49be8bb 100644 --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -45,6 +45,7 @@ #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" +#include "llvm/IR/InlineAsm.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" @@ -545,9 +546,16 @@ static BasicBlock *HandleCallsInBlockInlinedThroughInvoke( // instructions require no special handling. CallInst *CI = dyn_cast(I); - if (!CI || CI->doesNotThrow() || CI->isInlineAsm()) + if (!CI || CI->doesNotThrow()) continue; + if (CI->isInlineAsm()) { + InlineAsm *IA = cast(CI->getCalledOperand()); + if (!IA->canThrow()) { + continue; + } + } + // We do not need to (and in fact, cannot) convert possibly throwing calls // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into // invokes. The caller's "segment" of the deoptimization continuation diff --git a/llvm/lib/Transforms/Utils/ValueMapper.cpp b/llvm/lib/Transforms/Utils/ValueMapper.cpp index d875188..a89c923 100644 --- a/llvm/lib/Transforms/Utils/ValueMapper.cpp +++ b/llvm/lib/Transforms/Utils/ValueMapper.cpp @@ -369,7 +369,7 @@ Value *Mapper::mapValue(const Value *V) { if (NewTy != IA->getFunctionType()) V = InlineAsm::get(NewTy, IA->getAsmString(), IA->getConstraintString(), IA->hasSideEffects(), IA->isAlignStack(), - IA->getDialect()); + IA->getDialect(), IA->canThrow()); } return getVM()[V] = const_cast(V); diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-unwind-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-unwind-inline-asm.ll new file mode 100644 index 0000000..113ad30 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-no-unwind-inline-asm.ll @@ -0,0 +1,42 @@ +; RUN: llc -O0 -global-isel -stop-after=irtranslator < %s | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +@.str.2 = private unnamed_addr constant [7 x i8] c"Boom!\0A\00", align 1 + +define dso_local void @trap() { +entry: + unreachable +} + +define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: + +; CHECK-LABEL: name: test +; CHECK: body: +; CHECK-NEXT: bb.1.entry +; CHECK-NOT: EH_LABEL +; CHECK: INLINEASM +; CHECK-NOT: EH_LABEL + + invoke void asm sideeffect "bl trap", ""() + to label %invoke.cont unwind label %lpad + +invoke.cont: + ret void + +lpad: +; CHECK: bb.3.lpad +; CHECK: EH_LABEL + + %0 = landingpad { i8*, i32 } + cleanup + call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0)) + resume { i8*, i32 } %0 + +} + +declare dso_local i32 @__gxx_personality_v0(...) + +declare dso_local void @printf(i8*, ...) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll new file mode 100644 index 0000000..fdb0543 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/irtranslator-unwind-inline-asm.ll @@ -0,0 +1,42 @@ +; RUN: llc -O0 -global-isel -stop-after=irtranslator < %s | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +@.str.2 = private unnamed_addr constant [7 x i8] c"Boom!\0A\00", align 1 + +define dso_local void @trap() { +entry: + unreachable +} + +define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: + +; CHECK-LABEL: name: test +; CHECK: body: +; CHECK-NEXT: bb.1.entry +; CHECK: EH_LABEL +; CHECK-NEXT: INLINEASM +; CHECK-NEXT: EH_LABEL + + invoke void asm sideeffect unwind "bl trap", ""() + to label %invoke.cont unwind label %lpad + +invoke.cont: + ret void + +lpad: +; CHECK: bb.3.lpad +; CHECK: EH_LABEL + + %0 = landingpad { i8*, i32 } + cleanup + call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0)) + resume { i8*, i32 } %0 + +} + +declare dso_local i32 @__gxx_personality_v0(...) + +declare dso_local void @printf(i8*, ...) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/unwind-inline-asm.ll b/llvm/test/CodeGen/AArch64/GlobalISel/unwind-inline-asm.ll new file mode 100644 index 0000000..f32c34a --- /dev/null +++ b/llvm/test/CodeGen/AArch64/GlobalISel/unwind-inline-asm.ll @@ -0,0 +1,57 @@ +; RUN: llc -global-isel < %s | FileCheck %s + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64-unknown-linux-gnu" + +@.str.2 = private unnamed_addr constant [7 x i8] c"Boom!\0A\00", align 1 + +define dso_local void @trap() { +entry: + unreachable +} + +define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: + +; CHECK-LABEL: test: +; CHECK: .Ltmp0: +; CHECK: bl trap +; CHECK: .Ltmp1: + + invoke void asm sideeffect unwind "bl trap", ""() + to label %invoke.cont unwind label %lpad + +invoke.cont: + ret void + +lpad: + %0 = landingpad { i8*, i32 } + cleanup +; CHECK: bl printf + call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0)) + resume { i8*, i32 } %0 + +} + +declare dso_local i32 @__gxx_personality_v0(...) + +declare dso_local void @printf(i8*, ...) + +; Exception table generation around the inline assembly + +; CHECK-LABEL: GCC_except_table1: +; CHECK-NEXT: .Lexception0: +; CHECK-NEXT: .byte 255 // @LPStart Encoding = omit +; CHECK-NEXT: .byte 255 // @TType Encoding = omit +; CHECK-NEXT: .byte 1 // Call site Encoding = uleb128 +; CHECK-NEXT: .uleb128 .Lcst_end0-.Lcst_begin0 +; CHECK-NEXT: .Lcst_begin0: +; CHECK-NEXT: .uleb128 .Ltmp0-.Lfunc_begin0 // >> Call Site 1 << +; CHECK-NEXT: .uleb128 .Ltmp1-.Ltmp0 // Call between .Ltmp0 and .Ltmp1 +; CHECK-NEXT: .uleb128 .Ltmp2-.Lfunc_begin0 // jumps to .Ltmp2 +; CHECK-NEXT: .byte 0 // On action: cleanup +; CHECK-NEXT: .uleb128 .Ltmp1-.Lfunc_begin0 // >> Call Site 2 << +; CHECK-NEXT: .uleb128 .Lfunc_end1-.Ltmp1 // Call between .Ltmp1 and .Lfunc_end1 +; CHECK-NEXT: .byte 0 // has no landing pad +; CHECK-NEXT: .byte 0 // On action: cleanup +; CHECK-NEXT: .Lcst_end0: \ No newline at end of file diff --git a/llvm/test/CodeGen/X86/no-seh-unwind-inline-asm-codegen.ll b/llvm/test/CodeGen/X86/no-seh-unwind-inline-asm-codegen.ll new file mode 100644 index 0000000..b42afe3 --- /dev/null +++ b/llvm/test/CodeGen/X86/no-seh-unwind-inline-asm-codegen.ll @@ -0,0 +1,58 @@ +; RUN: llc < %s | FileCheck %s + +target datalayout = "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-windows-msvc19.28.29914" + +@str = private unnamed_addr constant [6 x i8] c"Boom!\00", align 1 + +define dso_local void @trap() { +entry: + unreachable +} + +define dso_local void @test() personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) { +entry: + +; CHECK-NOT: .Ltmp0: +; CHECK: callq trap +; CHECK-NOT: .Ltmp1: + + invoke void asm sideeffect "call trap", "~{dirflag},~{fpsr},~{flags}"() + to label %exit unwind label %except + +exit: + ret void + +except: + +; CHECK-LABEL: "?dtor$2@?0?test@4HA": +; CHECK: callq printf + + %0 = cleanuppad within none [] + call void (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @str, i64 0, i64 0)) [ "funclet"(token %0) ] + cleanupret from %0 unwind to caller +} + +declare dso_local i32 @__CxxFrameHandler3(...) + +declare dso_local void @printf(i8*, ...) + +; SEH Table + +; CHECK-LABEL: $cppxdata$test: +; CHECK-NEXT: .long 429065506 # MagicNumber +; CHECK-NEXT: .long 1 # MaxState +; CHECK-NEXT: .long ($stateUnwindMap$test)@IMGREL # UnwindMap +; CHECK-NEXT: .long 0 # NumTryBlocks +; CHECK-NEXT: .long 0 # TryBlockMap +; CHECK-NEXT: .long 1 # IPMapEntries +; CHECK-NEXT: .long ($ip2state$test)@IMGREL # IPToStateXData +; CHECK-NEXT: .long 40 # UnwindHelp +; CHECK-NEXT: .long 0 # ESTypeList +; CHECK-NEXT: .long 1 # EHFlags +; CHECK-NEXT:$stateUnwindMap$test: +; CHECK-NEXT: .long -1 # ToState +; CHECK-NEXT: .long "?dtor$2@?0?test@4HA"@IMGREL # Action +; CHECK-NEXT:$ip2state$test: +; CHECK-NEXT: .long .Lfunc_begin0@IMGREL # IP +; CHECK-NEXT: .long -1 # ToState diff --git a/llvm/test/CodeGen/X86/no-unwind-inline-asm-codegen.ll b/llvm/test/CodeGen/X86/no-unwind-inline-asm-codegen.ll new file mode 100644 index 0000000..3947a6e --- /dev/null +++ b/llvm/test/CodeGen/X86/no-unwind-inline-asm-codegen.ll @@ -0,0 +1,51 @@ +; RUN: llc < %s | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@.str.2 = private unnamed_addr constant [7 x i8] c"Boom!\0A\00", align 1 + +define dso_local void @trap() { +entry: + unreachable +} + +define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: + +; CHECK-NOT: .Ltmp0: +; CHECK: callq trap +; CHECK-NOT: .Ltmp1: + + invoke void asm sideeffect "call trap", "~{dirflag},~{fpsr},~{flags}"() + to label %invoke.cont unwind label %lpad + +invoke.cont: + ret void + +lpad: + %0 = landingpad { i8*, i32 } + cleanup + call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0)) + resume { i8*, i32 } %0 + +} + +declare dso_local i32 @__gxx_personality_v0(...) + +declare dso_local void @printf(i8*, ...) + +; Exception table generation around the inline assembly shouldn't exist + +; CHECK-LABEL: GCC_except_table1: +; CHECK-NEXT: .Lexception0: +; CHECK-NEXT: .byte 255 # @LPStart Encoding = omit +; CHECK-NEXT: .byte 255 # @TType Encoding = omit +; CHECK-NEXT: .byte 1 # Call site Encoding = uleb128 +; CHECK-NEXT: .uleb128 .Lcst_end0-.Lcst_begin0 +; CHECK-NEXT: .Lcst_begin0: +; CHECK-NEXT: .uleb128 .Lfunc_begin0-.Lfunc_begin0 # >> Call Site 1 << +; CHECK-NEXT: .uleb128 .Lfunc_end1-.Lfunc_begin0 # Call between .Lfunc_begin0 and .Lfunc_end1 +; CHECK-NEXT: .byte 0 # has no landing pad +; CHECK-NEXT: .byte 0 # On action: cleanup +; CHECK-NEXT: .Lcst_end0: diff --git a/llvm/test/CodeGen/X86/seh-unwind-inline-asm-codegen.ll b/llvm/test/CodeGen/X86/seh-unwind-inline-asm-codegen.ll new file mode 100644 index 0000000..a9859ba --- /dev/null +++ b/llvm/test/CodeGen/X86/seh-unwind-inline-asm-codegen.ll @@ -0,0 +1,63 @@ +; RUN: llc < %s | FileCheck %s + +target datalayout = "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-pc-windows-msvc19.28.29914" + +@str = private unnamed_addr constant [6 x i8] c"Boom!\00", align 1 + +define dso_local void @trap() { +entry: + unreachable +} + +define dso_local void @test() personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*) { +entry: + +; CHECK-LABEL: .Ltmp0: +; CHECK: callq trap +; CHECK-LABEL: .Ltmp1: + + invoke void asm sideeffect unwind "call trap", "~{dirflag},~{fpsr},~{flags}"() + to label %exit unwind label %except + +exit: + ret void + +except: + +; CHECK-LABEL: "?dtor$2@?0?test@4HA": +; CHECK: callq printf + + %0 = cleanuppad within none [] + call void (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @str, i64 0, i64 0)) [ "funclet"(token %0) ] + cleanupret from %0 unwind to caller +} + +declare dso_local i32 @__CxxFrameHandler3(...) + +declare dso_local void @printf(i8*, ...) + +; SEH Table + +; CHECK-LABEL: $cppxdata$test: +; CHECK-NEXT: .long 429065506 # MagicNumber +; CHECK-NEXT: .long 1 # MaxState +; CHECK-NEXT: .long ($stateUnwindMap$test)@IMGREL # UnwindMap +; CHECK-NEXT: .long 0 # NumTryBlocks +; CHECK-NEXT: .long 0 # TryBlockMap +; CHECK-NEXT: .long 3 # IPMapEntries +; CHECK-NEXT: .long ($ip2state$test)@IMGREL # IPToStateXData +; CHECK-NEXT: .long 40 # UnwindHelp +; CHECK-NEXT: .long 0 # ESTypeList +; CHECK-NEXT: .long 1 # EHFlags +; CHECK-NEXT:$stateUnwindMap$test: +; CHECK-NEXT: .long -1 # ToState +; CHECK-NEXT: .long "?dtor$2@?0?test@4HA"@IMGREL # Action +; CHECK-NEXT:$ip2state$test: +; CHECK-NEXT: .long .Lfunc_begin0@IMGREL # IP +; CHECK-NEXT: .long -1 # ToState +; CHECK-NEXT: .long .Ltmp0@IMGREL+1 # IP +; CHECK-NEXT: .long 0 # ToState +; CHECK-NEXT: .long .Ltmp1@IMGREL+1 # IP +; CHECK-NEXT: .long -1 # ToState + diff --git a/llvm/test/CodeGen/X86/sjlj-unwind-inline-asm-codegen.ll b/llvm/test/CodeGen/X86/sjlj-unwind-inline-asm-codegen.ll new file mode 100644 index 0000000..432e68f --- /dev/null +++ b/llvm/test/CodeGen/X86/sjlj-unwind-inline-asm-codegen.ll @@ -0,0 +1,40 @@ +; RUN: llc --exception-model=sjlj < %s | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@.str.2 = private unnamed_addr constant [7 x i8] c"Boom!\0A\00", align 1 + +define dso_local void @trap() { +entry: + unreachable +} + +define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_sj0 to i8*) { +entry: + +; CHECK: callq _Unwind_SjLj_Register@PLT +; CHECK-LABEL: .Ltmp0: +; CHECK: callq trap +; CHECK-LABEL: .Ltmp1: +; CHECK: callq _Unwind_SjLj_Unregister@PLT + + invoke void asm sideeffect unwind "call trap", "~{dirflag},~{fpsr},~{flags}"() + to label %invoke.cont unwind label %lpad + +invoke.cont: + ret void + +lpad: + %0 = landingpad { i8*, i32 } + cleanup +; CHECK: callq printf +; CHECK: callq _Unwind_SjLj_Resume@PLT + call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0)) + resume { i8*, i32 } %0 + +} + +declare dso_local i32 @__gxx_personality_sj0(...) + +declare dso_local void @printf(i8*, ...) diff --git a/llvm/test/CodeGen/X86/unwind-inline-asm-codegen.ll b/llvm/test/CodeGen/X86/unwind-inline-asm-codegen.ll new file mode 100644 index 0000000..aa67d40 --- /dev/null +++ b/llvm/test/CodeGen/X86/unwind-inline-asm-codegen.ll @@ -0,0 +1,56 @@ +; RUN: llc < %s | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@.str.2 = private unnamed_addr constant [7 x i8] c"Boom!\0A\00", align 1 + +define dso_local void @trap() { +entry: + unreachable +} + +define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: + +; CHECK-LABEL: .Ltmp0: +; CHECK: callq trap +; CHECK-LABEL: .Ltmp1: + + invoke void asm sideeffect unwind "call trap", "~{dirflag},~{fpsr},~{flags}"() + to label %invoke.cont unwind label %lpad + +invoke.cont: + ret void + +lpad: + %0 = landingpad { i8*, i32 } + cleanup +; CHECK: callq printf + call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0)) + resume { i8*, i32 } %0 + +} + +declare dso_local i32 @__gxx_personality_v0(...) + +declare dso_local void @printf(i8*, ...) + +; Exception table generation around the inline assembly + +; CHECK-LABEL: GCC_except_table1: +; CHECK-NEXT: .Lexception0: +; CHECK-NEXT: .byte 255 # @LPStart Encoding = omit +; CHECK-NEXT: .byte 255 # @TType Encoding = omit +; CHECK-NEXT: .byte 1 # Call site Encoding = uleb128 +; CHECK-NEXT: .uleb128 .Lcst_end0-.Lcst_begin0 +; CHECK-NEXT: .Lcst_begin0: +; CHECK-NEXT: .uleb128 .Ltmp0-.Lfunc_begin0 # >> Call Site 1 << +; CHECK-NEXT: .uleb128 .Ltmp1-.Ltmp0 # Call between .Ltmp0 and .Ltmp1 +; CHECK-NEXT: .uleb128 .Ltmp2-.Lfunc_begin0 # jumps to .Ltmp2 +; CHECK-NEXT: .byte 0 # On action: cleanup +; CHECK-NEXT: .uleb128 .Ltmp1-.Lfunc_begin0 # >> Call Site 2 << +; CHECK-NEXT: .uleb128 .Lfunc_end1-.Ltmp1 # Call between .Ltmp1 and .Lfunc_end1 +; CHECK-NEXT: .byte 0 # has no landing pad +; CHECK-NEXT: .byte 0 # On action: cleanup +; CHECK-NEXT: .Lcst_end0: diff --git a/llvm/test/Transforms/Inline/no-unwind-inline-asm.ll b/llvm/test/Transforms/Inline/no-unwind-inline-asm.ll new file mode 100644 index 0000000..4e93990 --- /dev/null +++ b/llvm/test/Transforms/Inline/no-unwind-inline-asm.ll @@ -0,0 +1,46 @@ +; RUN: opt < %s -inline -S | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@.str.2 = private unnamed_addr constant [7 x i8] c"Boom!\0A\00", align 1 + +define dso_local void @trap() { +entry: + unreachable +} + +define dso_local void @proxy() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: + call void asm sideeffect "call trap", "~{dirflag},~{fpsr},~{flags}"() + call void asm sideeffect "call trap", "~{dirflag},~{fpsr},~{flags}"() + ret void +} + +define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: +; CHECK: define dso_local void @test +; CHECK-NOT: invoke void @proxy() +; CHECK: call void asm sideeffect +; CHECK-NEXT: call void asm sideeffect + + invoke void @proxy() + to label %invoke.cont unwind label %lpad + +invoke.cont: + ret void + +lpad: +; CHECK: %0 = landingpad { i8*, i32 } +; CHECK: resume { i8*, i32 } %0 + + %0 = landingpad { i8*, i32 } + cleanup + call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0)) + resume { i8*, i32 } %0 + +} + +declare dso_local i32 @__gxx_personality_v0(...) + +declare dso_local void @printf(i8*, ...) diff --git a/llvm/test/Transforms/Inline/unwind-inline-asm.ll b/llvm/test/Transforms/Inline/unwind-inline-asm.ll new file mode 100644 index 0000000..5aad052 --- /dev/null +++ b/llvm/test/Transforms/Inline/unwind-inline-asm.ll @@ -0,0 +1,46 @@ +; RUN: opt < %s -inline -S | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@.str.2 = private unnamed_addr constant [7 x i8] c"Boom!\0A\00", align 1 + +define dso_local void @trap() { +entry: + unreachable +} + +define dso_local void @proxy() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: + call void asm sideeffect unwind "call trap", "~{dirflag},~{fpsr},~{flags}"() + call void asm sideeffect unwind "call trap", "~{dirflag},~{fpsr},~{flags}"() + ret void +} + +define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: +; CHECK: define dso_local void @test +; CHECK-NOT: invoke void @proxy() +; CHECK: invoke void asm sideeffect unwind +; CHECK: invoke void asm sideeffect unwind + + invoke void @proxy() + to label %invoke.cont unwind label %lpad + +invoke.cont: + ret void + +lpad: +; CHECK: %0 = landingpad { i8*, i32 } +; CHECK: resume { i8*, i32 } %0 + + %0 = landingpad { i8*, i32 } + cleanup + call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0)) + resume { i8*, i32 } %0 + +} + +declare dso_local i32 @__gxx_personality_v0(...) + +declare dso_local void @printf(i8*, ...) diff --git a/llvm/test/Transforms/InstCombine/no-unwind-inline-asm.ll b/llvm/test/Transforms/InstCombine/no-unwind-inline-asm.ll new file mode 100644 index 0000000..71e1406 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/no-unwind-inline-asm.ll @@ -0,0 +1,36 @@ +; RUN: opt < %s -O2 -S | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@.str.2 = private unnamed_addr constant [7 x i8] c"Boom!\0A\00", align 1 + +define dso_local void @trap() { +entry: + unreachable +} + +define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: +; CHECK: define dso_local void @test() +; CHECK-NEXT: entry: +; CHECK-NEXT: tail call void asm sideeffect +; CHECK-NEXT: ret void + + invoke void asm sideeffect "call trap", "~{dirflag},~{fpsr},~{flags}"() + to label %invoke.cont unwind label %lpad + +invoke.cont: + ret void + +lpad: + %0 = landingpad { i8*, i32 } + cleanup + call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0)) + resume { i8*, i32 } %0 + +} + +declare dso_local i32 @__gxx_personality_v0(...) + +declare dso_local void @printf(i8*, ...) diff --git a/llvm/test/Transforms/InstCombine/unwind-inline-asm.ll b/llvm/test/Transforms/InstCombine/unwind-inline-asm.ll new file mode 100644 index 0000000..8aa598b --- /dev/null +++ b/llvm/test/Transforms/InstCombine/unwind-inline-asm.ll @@ -0,0 +1,38 @@ +; RUN: opt < %s -O2 -S | FileCheck %s + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +@.str.2 = private unnamed_addr constant [7 x i8] c"Boom!\0A\00", align 1 + +define dso_local void @trap() { +entry: + unreachable +} + +define dso_local void @test() personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +entry: +; CHECK: define dso_local void @test() +; CHECK-NEXT: entry: +; CHECK-NEXT: invoke void asm sideeffect unwind + + invoke void asm sideeffect unwind "call trap", "~{dirflag},~{fpsr},~{flags}"() + to label %invoke.cont unwind label %lpad + +invoke.cont: + ret void + +lpad: +; CHECK: %0 = landingpad { i8*, i32 } +; CHECK: resume { i8*, i32 } %0 + + %0 = landingpad { i8*, i32 } + cleanup + call void (i8*, ...) @printf(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.2, i64 0, i64 0)) + resume { i8*, i32 } %0 + +} + +declare dso_local i32 @__gxx_personality_v0(...) + +declare dso_local void @printf(i8*, ...)