From 2f63cbcc0c508c502e1f59cd911a36f73d1c460a Mon Sep 17 00:00:00 2001 From: Sanjoy Das Date: Tue, 7 Feb 2017 19:19:49 +0000 Subject: [PATCH] [ImplicitNullCheck] Extend Implicit Null Check scope by using stores Summary: This change allows usage of store instruction for implicit null check. Memory Aliasing Analisys is not used and change conservatively supposes that any store and load may access the same memory. As a result re-ordering of store-store, store-load and load-store is prohibited. Patch by Serguei Katkov! Reviewers: reames, sanjoy Reviewed By: sanjoy Subscribers: atrick, llvm-commits Differential Revision: https://reviews.llvm.org/D29400 llvm-svn: 294338 --- llvm/docs/FaultMaps.rst | 7 +- llvm/include/llvm/CodeGen/FaultMaps.h | 7 +- llvm/include/llvm/Target/Target.td | 3 +- llvm/include/llvm/Target/TargetOpcodes.def | 6 +- llvm/lib/CodeGen/FaultMaps.cpp | 4 + llvm/lib/CodeGen/ImplicitNullChecks.cpp | 121 +++-- llvm/lib/Target/X86/X86AsmPrinter.h | 2 +- llvm/lib/Target/X86/X86MCInstLower.cpp | 42 +- llvm/test/CodeGen/X86/block-placement.mir | 4 +- llvm/test/CodeGen/X86/implicit-null-check.ll | 41 +- llvm/test/CodeGen/X86/implicit-null-checks.mir | 658 ++++++++++++++++++++++++- 11 files changed, 809 insertions(+), 86 deletions(-) diff --git a/llvm/docs/FaultMaps.rst b/llvm/docs/FaultMaps.rst index 4ecdd86..3a360a2 100644 --- a/llvm/docs/FaultMaps.rst +++ b/llvm/docs/FaultMaps.rst @@ -47,12 +47,17 @@ The format of this section is uint32 : NumFaultingPCs uint32 : Reserved (expected to be 0) FunctionFaultInfo[NumFaultingPCs] { - uint32 : FaultKind = FaultMaps::FaultingLoad (only legal value currently) + uint32 : FaultKind uint32 : FaultingPCOffset uint32 : HandlerPCOffset } } +FailtKind describes the reason of expected fault. +Currently three kind of faults are supported: + 1. FaultingLoad - fault due to load from memory. + 2. FaultingLoadStore - fault due to instruction load and store. + 3. FaultingStore - fault due to store to memory. The ``ImplicitNullChecks`` pass =============================== diff --git a/llvm/include/llvm/CodeGen/FaultMaps.h b/llvm/include/llvm/CodeGen/FaultMaps.h index 9b5a3e1..a278e7c 100644 --- a/llvm/include/llvm/CodeGen/FaultMaps.h +++ b/llvm/include/llvm/CodeGen/FaultMaps.h @@ -26,7 +26,12 @@ class MCStreamer; class FaultMaps { public: - enum FaultKind { FaultingLoad = 1, FaultKindMax }; + enum FaultKind { + FaultingLoad = 1, + FaultingLoadStore, + FaultingStore, + FaultKindMax + }; static const char *faultTypeToString(FaultKind); diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td index bf0bdae..76de664 100644 --- a/llvm/include/llvm/Target/Target.td +++ b/llvm/include/llvm/Target/Target.td @@ -951,11 +951,12 @@ def LOCAL_ESCAPE : Instruction { let hasSideEffects = 0; let hasCtrlDep = 1; } -def FAULTING_LOAD_OP : Instruction { +def FAULTING_OP : Instruction { let OutOperandList = (outs unknown:$dst); let InOperandList = (ins variable_ops); let usesCustomInserter = 1; let mayLoad = 1; + let mayStore = 1; let isTerminator = 1; let isBranch = 1; } diff --git a/llvm/include/llvm/Target/TargetOpcodes.def b/llvm/include/llvm/Target/TargetOpcodes.def index 76ed060..290e3ff 100644 --- a/llvm/include/llvm/Target/TargetOpcodes.def +++ b/llvm/include/llvm/Target/TargetOpcodes.def @@ -134,11 +134,13 @@ HANDLE_TARGET_OPCODE(STATEPOINT) /// frame index of the local stack allocation. HANDLE_TARGET_OPCODE(LOCAL_ESCAPE) -/// Loading instruction that may page fault, bundled with associated +/// Wraps a machine instruction which can fault, bundled with associated +/// information on how to handle such a fault. +/// For example loading instruction that may page fault, bundled with associated /// information on how to handle such a page fault. It is intended to support /// "zero cost" null checks in managed languages by allowing LLVM to fold /// comparisons into existing memory operations. -HANDLE_TARGET_OPCODE(FAULTING_LOAD_OP) +HANDLE_TARGET_OPCODE(FAULTING_OP) /// Wraps a machine instruction to add patchability constraints. An /// instruction wrapped in PATCHABLE_OP has to either have a minimum diff --git a/llvm/lib/CodeGen/FaultMaps.cpp b/llvm/lib/CodeGen/FaultMaps.cpp index 2acafaf..374d6c7 100644 --- a/llvm/lib/CodeGen/FaultMaps.cpp +++ b/llvm/lib/CodeGen/FaultMaps.cpp @@ -110,6 +110,10 @@ const char *FaultMaps::faultTypeToString(FaultMaps::FaultKind FT) { case FaultMaps::FaultingLoad: return "FaultingLoad"; + case FaultMaps::FaultingLoadStore: + return "FaultingLoadStore"; + case FaultMaps::FaultingStore: + return "FaultingStore"; } } diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp index 0a9b9a9..fc80f8f 100644 --- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp +++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp @@ -22,6 +22,7 @@ // With the help of a runtime that understands the .fault_maps section, // faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs // a page fault. +// Store is also supported. // //===----------------------------------------------------------------------===// @@ -29,6 +30,7 @@ #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/CodeGen/FaultMaps.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineMemOperand.h" @@ -154,8 +156,8 @@ class ImplicitNullChecks : public MachineFunctionPass { bool analyzeBlockForNullChecks(MachineBasicBlock &MBB, SmallVectorImpl &NullCheckList); - MachineInstr *insertFaultingLoad(MachineInstr *LoadMI, MachineBasicBlock *MBB, - MachineBasicBlock *HandlerMBB); + MachineInstr *insertFaultingInstr(MachineInstr *MI, MachineBasicBlock *MBB, + MachineBasicBlock *HandlerMBB); void rewriteNullChecks(ArrayRef NullCheckList); enum SuitabilityResult { SR_Suitable, SR_Unsuitable, SR_Impossible }; @@ -165,16 +167,18 @@ class ImplicitNullChecks : public MachineFunctionPass { /// \p MI cannot be used to null check and SR_Impossible if there is /// no sense to continue lookup due to any other instruction will not be able /// to be used. \p PrevInsts is the set of instruction seen since - /// the explicit null check on \p PointerReg. + /// the explicit null check on \p PointerReg. \p SeenLoad means that load + /// instruction has been observed in \PrevInsts set. SuitabilityResult isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg, - ArrayRef PrevInsts); + ArrayRef PrevInsts, + bool &SeenLoad); /// Return true if \p FaultingMI can be hoisted from after the the /// instructions in \p InstsSeenSoFar to before them. Set \p Dependence to a /// non-null value if we also need to (and legally can) hoist a depedency. - bool canHoistLoadInst(MachineInstr *FaultingMI, unsigned PointerReg, - ArrayRef InstsSeenSoFar, - MachineBasicBlock *NullSucc, MachineInstr *&Dependence); + bool canHoistInst(MachineInstr *FaultingMI, unsigned PointerReg, + ArrayRef InstsSeenSoFar, + MachineBasicBlock *NullSucc, MachineInstr *&Dependence); public: static char ID; @@ -198,7 +202,7 @@ public: } bool ImplicitNullChecks::canHandle(const MachineInstr *MI) { - if (MI->isCall() || MI->mayStore() || MI->hasUnmodeledSideEffects()) + if (MI->isCall() || MI->hasUnmodeledSideEffects()) return false; auto IsRegMask = [](const MachineOperand &MO) { return MO.isRegMask(); }; (void)IsRegMask; @@ -290,22 +294,36 @@ static bool AnyAliasLiveIn(const TargetRegisterInfo *TRI, ImplicitNullChecks::SuitabilityResult ImplicitNullChecks::isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg, - ArrayRef PrevInsts) { + ArrayRef PrevInsts, + bool &SeenLoad) { int64_t Offset; unsigned BaseReg; + // First, if it is a store and we saw load before we bail out + // because we will not be able to re-order load-store without + // using alias analysis. + if (SeenLoad && MI.mayStore()) + return SR_Impossible; + + SeenLoad = SeenLoad || MI.mayLoad(); + + // Without alias analysis we cannot re-order store with anything. + // so if this instruction is not a candidate we should stop. + SuitabilityResult Unsuitable = MI.mayStore() ? SR_Impossible : SR_Unsuitable; + if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI) || BaseReg != PointerReg) - return SR_Unsuitable; + return Unsuitable; - // We want the load to be issued at a sane offset from PointerReg, so that - // if PointerReg is null then the load reliably page faults. - if (!(MI.mayLoad() && !MI.isPredicable() && Offset < PageSize)) - return SR_Unsuitable; + // We want the mem access to be issued at a sane offset from PointerReg, + // so that if PointerReg is null then the access reliably page faults. + if (!((MI.mayLoad() || MI.mayStore()) && !MI.isPredicable() && + Offset < PageSize)) + return Unsuitable; - // Finally, we need to make sure that the load instruction actually is - // loading from PointerReg, and there isn't some re-definition of PointerReg - // between the compare and the load. + // Finally, we need to make sure that the access instruction actually is + // accessing from PointerReg, and there isn't some re-definition of PointerReg + // between the compare and the memory access. // If PointerReg has been redefined before then there is no sense to continue // lookup due to this condition will fail for any further instruction. for (auto *PrevMI : PrevInsts) @@ -317,10 +335,11 @@ ImplicitNullChecks::isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg, return SR_Suitable; } -bool ImplicitNullChecks::canHoistLoadInst( - MachineInstr *FaultingMI, unsigned PointerReg, - ArrayRef InstsSeenSoFar, MachineBasicBlock *NullSucc, - MachineInstr *&Dependence) { +bool ImplicitNullChecks::canHoistInst(MachineInstr *FaultingMI, + unsigned PointerReg, + ArrayRef InstsSeenSoFar, + MachineBasicBlock *NullSucc, + MachineInstr *&Dependence) { auto DepResult = computeDependence(FaultingMI, InstsSeenSoFar); if (!DepResult.CanReorder) return false; @@ -484,17 +503,19 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks( const unsigned PointerReg = MBP.LHS.getReg(); SmallVector InstsSeenSoFar; + bool SeenLoad = false; for (auto &MI : *NotNullSucc) { if (!canHandle(&MI) || InstsSeenSoFar.size() >= MaxInstsToConsider) return false; MachineInstr *Dependence; - SuitabilityResult SR = isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar); + SuitabilityResult SR = + isSuitableMemoryOp(MI, PointerReg, InstsSeenSoFar, SeenLoad); if (SR == SR_Impossible) return false; - if (SR == SR_Suitable && canHoistLoadInst(&MI, PointerReg, InstsSeenSoFar, - NullSucc, Dependence)) { + if (SR == SR_Suitable && + canHoistInst(&MI, PointerReg, InstsSeenSoFar, NullSucc, Dependence)) { NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc, NullSucc, Dependence); return true; @@ -506,36 +527,42 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks( return false; } -/// Wrap a machine load instruction, LoadMI, into a FAULTING_LOAD_OP machine -/// instruction. The FAULTING_LOAD_OP instruction does the same load as LoadMI -/// (defining the same register), and branches to HandlerMBB if the load -/// faults. The FAULTING_LOAD_OP instruction is inserted at the end of MBB. -MachineInstr * -ImplicitNullChecks::insertFaultingLoad(MachineInstr *LoadMI, - MachineBasicBlock *MBB, - MachineBasicBlock *HandlerMBB) { +/// Wrap a machine instruction, MI, into a FAULTING machine instruction. +/// The FAULTING instruction does the same load/store as MI +/// (defining the same register), and branches to HandlerMBB if the mem access +/// faults. The FAULTING instruction is inserted at the end of MBB. +MachineInstr *ImplicitNullChecks::insertFaultingInstr( + MachineInstr *MI, MachineBasicBlock *MBB, MachineBasicBlock *HandlerMBB) { const unsigned NoRegister = 0; // Guaranteed to be the NoRegister value for // all targets. DebugLoc DL; - unsigned NumDefs = LoadMI->getDesc().getNumDefs(); + unsigned NumDefs = MI->getDesc().getNumDefs(); assert(NumDefs <= 1 && "other cases unhandled!"); unsigned DefReg = NoRegister; if (NumDefs != 0) { - DefReg = LoadMI->defs().begin()->getReg(); - assert(std::distance(LoadMI->defs().begin(), LoadMI->defs().end()) == 1 && + DefReg = MI->defs().begin()->getReg(); + assert(std::distance(MI->defs().begin(), MI->defs().end()) == 1 && "expected exactly one def!"); } - auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_LOAD_OP), DefReg) + FaultMaps::FaultKind FK; + if (MI->mayLoad()) + FK = + MI->mayStore() ? FaultMaps::FaultingLoadStore : FaultMaps::FaultingLoad; + else + FK = FaultMaps::FaultingStore; + + auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_OP), DefReg) + .addImm(FK) .addMBB(HandlerMBB) - .addImm(LoadMI->getOpcode()); + .addImm(MI->getOpcode()); - for (auto &MO : LoadMI->uses()) + for (auto &MO : MI->uses()) MIB.add(MO); - MIB.setMemRefs(LoadMI->memoperands_begin(), LoadMI->memoperands_end()); + MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); return MIB; } @@ -556,18 +583,18 @@ void ImplicitNullChecks::rewriteNullChecks( NC.getCheckBlock()->insert(NC.getCheckBlock()->end(), DepMI); } - // Insert a faulting load where the conditional branch was originally. We - // check earlier ensures that this bit of code motion is legal. We do not - // touch the successors list for any basic block since we haven't changed - // control flow, we've just made it implicit. - MachineInstr *FaultingLoad = insertFaultingLoad( + // Insert a faulting instruction where the conditional branch was + // originally. We check earlier ensures that this bit of code motion + // is legal. We do not touch the successors list for any basic block + // since we haven't changed control flow, we've just made it implicit. + MachineInstr *FaultingInstr = insertFaultingInstr( NC.getMemOperation(), NC.getCheckBlock(), NC.getNullSucc()); // Now the values defined by MemOperation, if any, are live-in of // the block of MemOperation. - // The original load operation may define implicit-defs alongside - // the loaded value. + // The original operation may define implicit-defs alongside + // the value. MachineBasicBlock *MBB = NC.getMemOperation()->getParent(); - for (const MachineOperand &MO : FaultingLoad->operands()) { + for (const MachineOperand &MO : FaultingInstr->operands()) { if (!MO.isReg() || !MO.isDef()) continue; unsigned Reg = MO.getReg(); diff --git a/llvm/lib/Target/X86/X86AsmPrinter.h b/llvm/lib/Target/X86/X86AsmPrinter.h index bb15fd7..44bc373 100644 --- a/llvm/lib/Target/X86/X86AsmPrinter.h +++ b/llvm/lib/Target/X86/X86AsmPrinter.h @@ -81,7 +81,7 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter { void LowerSTACKMAP(const MachineInstr &MI); void LowerPATCHPOINT(const MachineInstr &MI, X86MCInstLower &MCIL); void LowerSTATEPOINT(const MachineInstr &MI, X86MCInstLower &MCIL); - void LowerFAULTING_LOAD_OP(const MachineInstr &MI, X86MCInstLower &MCIL); + void LowerFAULTING_OP(const MachineInstr &MI, X86MCInstLower &MCIL); void LowerPATCHABLE_OP(const MachineInstr &MI, X86MCInstLower &MCIL); void LowerTlsAddr(X86MCInstLower &MCInstLowering, const MachineInstr &MI); diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index fd4626c..6594953 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -894,30 +894,34 @@ void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI, SM.recordStatepoint(MI); } -void X86AsmPrinter::LowerFAULTING_LOAD_OP(const MachineInstr &MI, - X86MCInstLower &MCIL) { - // FAULTING_LOAD_OP , , , +void X86AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI, + X86MCInstLower &MCIL) { + // FAULTING_LOAD_OP , , , + // , - unsigned LoadDefRegister = MI.getOperand(0).getReg(); - MCSymbol *HandlerLabel = MI.getOperand(1).getMBB()->getSymbol(); - unsigned LoadOpcode = MI.getOperand(2).getImm(); - unsigned LoadOperandsBeginIdx = 3; + unsigned DefRegister = FaultingMI.getOperand(0).getReg(); + FaultMaps::FaultKind FK = + static_cast(FaultingMI.getOperand(1).getImm()); + MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol(); + unsigned Opcode = FaultingMI.getOperand(3).getImm(); + unsigned OperandsBeginIdx = 4; - FM.recordFaultingOp(FaultMaps::FaultingLoad, HandlerLabel); + assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!"); + FM.recordFaultingOp(FK, HandlerLabel); - MCInst LoadMI; - LoadMI.setOpcode(LoadOpcode); + MCInst MI; + MI.setOpcode(Opcode); - if (LoadDefRegister != X86::NoRegister) - LoadMI.addOperand(MCOperand::createReg(LoadDefRegister)); + if (DefRegister != X86::NoRegister) + MI.addOperand(MCOperand::createReg(DefRegister)); - for (auto I = MI.operands_begin() + LoadOperandsBeginIdx, - E = MI.operands_end(); + for (auto I = FaultingMI.operands_begin() + OperandsBeginIdx, + E = FaultingMI.operands_end(); I != E; ++I) - if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, *I)) - LoadMI.addOperand(MaybeOperand.getValue()); + if (auto MaybeOperand = MCIL.LowerMachineOperand(&FaultingMI, *I)) + MI.addOperand(MaybeOperand.getValue()); - OutStreamer->EmitInstruction(LoadMI, getSubtargetInfo()); + OutStreamer->EmitInstruction(MI, getSubtargetInfo()); } void X86AsmPrinter::LowerFENTRY_CALL(const MachineInstr &MI, @@ -1388,8 +1392,8 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { case TargetOpcode::STATEPOINT: return LowerSTATEPOINT(*MI, MCInstLowering); - case TargetOpcode::FAULTING_LOAD_OP: - return LowerFAULTING_LOAD_OP(*MI, MCInstLowering); + case TargetOpcode::FAULTING_OP: + return LowerFAULTING_OP(*MI, MCInstLowering); case TargetOpcode::FENTRY_CALL: return LowerFENTRY_CALL(*MI, MCInstLowering); diff --git a/llvm/test/CodeGen/X86/block-placement.mir b/llvm/test/CodeGen/X86/block-placement.mir index 7d13c3e..c0cd705 100644 --- a/llvm/test/CodeGen/X86/block-placement.mir +++ b/llvm/test/CodeGen/X86/block-placement.mir @@ -46,7 +46,7 @@ liveins: - { reg: '%rdi' } - { reg: '%esi' } -# CHECK: %eax = FAULTING_LOAD_OP %bb.3.null, 1684, killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr) +# CHECK: %eax = FAULTING_OP 1, %bb.3.null, 1684, killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr) # CHECK-NEXT: JMP_1 %bb.2.not_null # CHECK: bb.3.null: # CHECK: bb.4.right: @@ -66,7 +66,7 @@ body: | successors: %bb.2.null(0x7ffff800), %bb.4.not_null(0x00000800) liveins: %rdi - %eax = FAULTING_LOAD_OP %bb.2.null, 1684, killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr) + %eax = FAULTING_OP 1, %bb.2.null, 1684, killed %rdi, 1, _, 0, _ :: (load 4 from %ir.ptr) JMP_1 %bb.4.not_null bb.4.not_null: diff --git a/llvm/test/CodeGen/X86/implicit-null-check.ll b/llvm/test/CodeGen/X86/implicit-null-check.ll index 286b4fa..ee79566 100644 --- a/llvm/test/CodeGen/X86/implicit-null-check.ll +++ b/llvm/test/CodeGen/X86/implicit-null-check.ll @@ -162,6 +162,26 @@ define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) { ret i32 %z } +define void @imp_null_check_store(i32* %x) { +; CHECK-LABEL: _imp_null_check_store: +; CHECK: [[BB0_imp_null_check_store:L[^:]+]]: +; CHECK: movl $1, (%rdi) +; CHECK: retq +; CHECK: [[BB1_imp_null_check_store:LBB6_[0-9]+]]: +; CHECK: retq + + entry: + %c = icmp eq i32* %x, null + br i1 %c, label %is_null, label %not_null, !make.implicit !0 + + is_null: + ret void + + not_null: + store i32 1, i32* %x + ret void +} + !0 = !{} ; CHECK-LABEL: __LLVM_FaultMaps: @@ -174,7 +194,7 @@ define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) { ; CHECK-NEXT: .short 0 ; # functions: -; CHECK-NEXT: .long 6 +; CHECK-NEXT: .long 7 ; FunctionAddr: ; CHECK-NEXT: .quad _imp_null_check_add_result @@ -242,6 +262,19 @@ define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) { ; CHECK-NEXT: .long [[BB1_imp_null_check_load]]-_imp_null_check_load ; FunctionAddr: +; CHECK-NEXT: .quad _imp_null_check_store +; NumFaultingPCs +; CHECK-NEXT: .long 1 +; Reserved: +; CHECK-NEXT: .long 0 +; Fault[0].Type: +; CHECK-NEXT: .long 3 +; Fault[0].FaultOffset: +; CHECK-NEXT: .long [[BB0_imp_null_check_store]]-_imp_null_check_store +; Fault[0].HandlerOffset: +; CHECK-NEXT: .long [[BB1_imp_null_check_store]]-_imp_null_check_store + +; FunctionAddr: ; CHECK-NEXT: .quad _imp_null_check_via_mem_comparision ; NumFaultingPCs ; CHECK-NEXT: .long 1 @@ -256,7 +289,7 @@ define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) { ; OBJDUMP: FaultMap table: ; OBJDUMP-NEXT: Version: 0x1 -; OBJDUMP-NEXT: NumFunctions: 6 +; OBJDUMP-NEXT: NumFunctions: 7 ; OBJDUMP-NEXT: FunctionAddress: 0x000000, NumFaultingPCs: 1 ; OBJDUMP-NEXT: Fault kind: FaultingLoad, faulting PC offset: 0, handling PC offset: 5 ; OBJDUMP-NEXT: FunctionAddress: 0x000000, NumFaultingPCs: 1 @@ -267,3 +300,7 @@ define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) { ; OBJDUMP-NEXT: Fault kind: FaultingLoad, faulting PC offset: 0, handling PC offset: 7 ; OBJDUMP-NEXT: FunctionAddress: 0x000000, NumFaultingPCs: 1 ; OBJDUMP-NEXT: Fault kind: FaultingLoad, faulting PC offset: 0, handling PC offset: 3 +; OBJDUMP-NEXT: FunctionAddress: 0x000000, NumFaultingPCs: 1 +; OBJDUMP-NEXT: Fault kind: FaultingStore, faulting PC offset: 0, handling PC offset: 7 +; OBJDUMP-NEXT: FunctionAddress: 0x000000, NumFaultingPCs: 1 +; OBJDUMP-NEXT: Fault kind: FaultingLoad, faulting PC offset: 0, handling PC offset: 11 diff --git a/llvm/test/CodeGen/X86/implicit-null-checks.mir b/llvm/test/CodeGen/X86/implicit-null-checks.mir index 97a1d6f..e5cab15 100644 --- a/llvm/test/CodeGen/X86/implicit-null-checks.mir +++ b/llvm/test/CodeGen/X86/implicit-null-checks.mir @@ -143,8 +143,6 @@ ret i32 0 } - attributes #0 = { "target-features"="+bmi,+bmi2" } - define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) { entry: %c = icmp eq i32* %x, null @@ -174,6 +172,177 @@ ret i32 undef } + define void @inc_store(i32* %ptr, i32 %val) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret void + + is_null: + ret void + } + + define void @inc_store_plus_offset(i32* %ptr, i32 %val) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret void + + is_null: + ret void + } + + define void @inc_store_with_dep(i32* %ptr, i32 %val) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret void + + is_null: + ret void + } + + define i32 @inc_store_with_dep_in_null(i32* %ptr, i32 %val) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret i32 undef + + is_null: + ret i32 undef + } + + define void @inc_store_with_volatile(i32* %ptr, i32 %val) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret void + + is_null: + ret void + } + + define void @inc_store_with_two_dep(i32* %ptr, i32 %val) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret void + + is_null: + ret void + } + + define void @inc_store_with_redefined_base(i32* %ptr, i32 %val) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret void + + is_null: + ret void + } + + define i32 @inc_store_with_reused_base(i32* %ptr, i32 %val) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret i32 undef + + is_null: + ret i32 undef + } + + define i32 @inc_store_across_call(i32* %ptr) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + call void @f() + ret i32 undef + + is_null: + ret i32 undef + } + + define i32 @inc_store_with_dep_in_dep(i32* %ptr, i32 %val) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret i32 undef + + is_null: + ret i32 undef + } + + define i32 @inc_store_with_load_over_store(i32* %ptr, i32* %ptr2) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret i32 undef + + is_null: + ret i32 undef + } + + define i32 @inc_store_with_store_over_load(i32* %ptr, i32* %ptr2) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret i32 undef + + is_null: + ret i32 undef + } + + define void @inc_store_with_store_over_store(i32* %ptr, i32* %ptr2) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret void + + is_null: + ret void + } + + define void @inc_store_with_load_and_store(i32* %ptr, i32* %ptr2) { + entry: + %ptr_is_null = icmp eq i32* %ptr, null + br i1 %ptr_is_null, label %is_null, label %not_null, !make.implicit !0 + + not_null: + ret void + + is_null: + ret void + } + + attributes #0 = { "target-features"="+bmi,+bmi2" } + !0 = !{} ... --- @@ -186,7 +355,7 @@ liveins: - { reg: '%esi' } # CHECK: bb.0.entry: # CHECK: %eax = MOV32ri 2200000 -# CHECK-NEXT: %eax = FAULTING_LOAD_OP %bb.3.is_null, {{[0-9]+}}, killed %eax, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x) +# CHECK-NEXT: %eax = FAULTING_OP 1, %bb.3.is_null, {{[0-9]+}}, killed %eax, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x) # CHECK-NEXT: JMP_1 %bb.1.not_null body: | @@ -360,7 +529,7 @@ liveins: - { reg: '%rsi' } # CHECK: bb.0.entry: # CHECK: %rbx = MOV64rr %rdx -# CHECK-NEXT: %rdi = FAULTING_LOAD_OP %bb.3.is_null, {{[0-9]+}}, killed %rbx, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x) +# CHECK-NEXT: %rdi = FAULTING_OP 1, %bb.3.is_null, {{[0-9]+}}, killed %rbx, killed %rdi, 1, _, 0, _, implicit-def dead %eflags :: (load 4 from %ir.x) body: | bb.0.entry: @@ -405,7 +574,7 @@ calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d', '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ] # CHECK: body: -# CHECK-NOT: FAULTING_LOAD_OP +# CHECK-NOT: FAULTING_OP # CHECK: bb.1.stay: # CHECK: CALL64pcrel32 body: | @@ -438,7 +607,7 @@ body: | name: dependency_live_in_hazard # CHECK-LABEL: name: dependency_live_in_hazard # CHECK: bb.0.entry: -# CHECK-NOT: FAULTING_LOAD_OP +# CHECK-NOT: FAULTING_OP # CHECK: bb.1.not_null: # Make sure that the BEXTR32rm instruction below is not used to emit @@ -474,9 +643,9 @@ body: | ... --- name: use_alternate_load_op -# CHECK-LABEL: use_alternate_load_op +# CHECK-LABEL: name: use_alternate_load_op # CHECK: bb.0.entry: -# CHECK: %r10 = FAULTING_LOAD_OP %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 0, _ +# CHECK: %r10 = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 0, _ # CHECK-NEXT: JMP_1 %bb.1.not_null # CHECK: bb.1.not_null @@ -508,8 +677,9 @@ body: | ... --- name: imp_null_check_gep_load_with_use_dep +# CHECK-LABEL: name: imp_null_check_gep_load_with_use_dep # CHECK: bb.0.entry: -# CHECK: %eax = FAULTING_LOAD_OP %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 0, _, implicit-def %rax :: (load 4 from %ir.x) +# CHECK: %eax = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 0, _, implicit-def %rax :: (load 4 from %ir.x) # CHECK-NEXT: JMP_1 %bb.1.not_null alignment: 4 tracksRegLiveness: true @@ -539,9 +709,10 @@ body: | ... --- name: imp_null_check_load_with_base_sep +# CHECK-LABEL: name: imp_null_check_load_with_base_sep # CHECK: bb.0.entry: # CHECK: %rsi = ADD64rr %rsi, %rdi, implicit-def dead %eflags -# CHECK-NEXT: %esi = FAULTING_LOAD_OP %bb.2.is_null, {{[0-9]+}}, killed %esi, %rdi, 1, _, 0, _, implicit-def dead %eflags +# CHECK-NEXT: %esi = FAULTING_OP 1, %bb.2.is_null, {{[0-9]+}}, killed %esi, %rdi, 1, _, 0, _, implicit-def dead %eflags # CHECK-NEXT: JMP_1 %bb.1.not_null alignment: 4 tracksRegLiveness: true @@ -569,3 +740,470 @@ body: | RETQ %eax ... +--- +name: inc_store +# CHECK-LABEL: name: inc_store +# CHECK: bb.0.entry: +# CHECK: _ = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 0, _, killed %rsi +# CHECK-NEXT: JMP_1 %bb.1.not_null +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + MOV64mr killed %rdi, 1, _, 0, _, killed %rsi + RETQ + + bb.2.is_null: + RETQ + +... +--- +name: inc_store_plus_offset +# CHECK-LABEL: inc_store_plus_offset +# CHECK: bb.0.entry: +# CHECK: _ = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 16, _, killed %rsi +# CHECK-NEXT: JMP_1 %bb.1.not_null +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + MOV64mr killed %rdi, 1, _, 16, _, killed %rsi + RETQ + + bb.2.is_null: + RETQ + +... +--- +name: inc_store_with_dep +# CHECK-LABEL: inc_store_with_dep +# CHECK: bb.0.entry: +# CHECK: %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags +# CHECK-NEXT: _ = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 16, _, killed %esi +# CHECK-NEXT: JMP_1 %bb.1.not_null +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags + MOV32mr killed %rdi, 1, _, 16, _, killed %esi + RETQ + + bb.2.is_null: + RETQ + +... +--- +name: inc_store_with_dep_in_null +# CHECK-LABEL: inc_store_with_dep_in_null +# CHECK: bb.0.entry: +# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags +# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + %esi = ADD32rr %esi, %esi, implicit-def dead %eflags + MOV32mr killed %rdi, 1, _, 0, _, %esi + %eax = MOV32rr killed %esi + RETQ %eax + + bb.2.is_null: + liveins: %rsi + + %eax = MOV32rr killed %esi + RETQ %eax + +... +--- +name: inc_store_with_volatile +# CHECK-LABEL: inc_store_with_volatile +# CHECK: bb.0.entry: +# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags +# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + MOV32mr killed %rdi, 1, _, 0, _, killed %esi :: (volatile store 4 into %ir.ptr) + RETQ + + bb.2.is_null: + RETQ + +... +--- +name: inc_store_with_two_dep +# CHECK-LABEL: inc_store_with_two_dep +# CHECK: bb.0.entry: +# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags +# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + %esi = ADD32rr killed %esi, killed %esi, implicit-def dead %eflags + %esi = ADD32ri killed %esi, 15, implicit-def dead %eflags + MOV32mr killed %rdi, 1, _, 16, _, killed %esi + RETQ + + bb.2.is_null: + RETQ + +... +--- +name: inc_store_with_redefined_base +# CHECK-LABEL: inc_store_with_redefined_base +# CHECK: bb.0.entry: +# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags +# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + %rdi = ADD64rr killed %rdi, killed %rdi, implicit-def dead %eflags + MOV32mr killed %rdi, 1, _, 16, _, killed %esi + RETQ + + bb.2.is_null: + RETQ + +... +--- +name: inc_store_with_reused_base +# CHECK-LABEL: inc_store_with_reused_base +# CHECK: bb.0.entry: +# CHECK: _ = FAULTING_OP 3, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 16, _, killed %esi +# CHECK-NEXT: JMP_1 %bb.1.not_null +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + %rax = MOV64rr %rdi + MOV32mr killed %rdi, 1, _, 16, _, killed %esi + RETQ %eax + + bb.2.is_null: + %rax = XOR64rr undef %rax, undef %rax, implicit-def dead %eflags + RETQ %eax + +... +--- +name: inc_store_across_call +# CHECK-LABEL: inc_store_across_call +# CHECK: bb.0.entry: +# CHECK: TEST64rr %rbx, %rbx, implicit-def %eflags +# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } +calleeSavedRegisters: [ '%bh', '%bl', '%bp', '%bpl', '%bx', '%ebp', '%ebx', + '%rbp', '%rbx', '%r12', '%r13', '%r14', '%r15', + '%r12b', '%r13b', '%r14b', '%r15b', '%r12d', '%r13d', + '%r14d', '%r15d', '%r12w', '%r13w', '%r14w', '%r15w' ] +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rbx + + frame-setup PUSH64r killed %rbx, implicit-def %rsp, implicit %rsp + CFI_INSTRUCTION def_cfa_offset 16 + CFI_INSTRUCTION offset %rbx, -16 + %rbx = MOV64rr killed %rdi + TEST64rr %rbx, %rbx, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rbx + + CALL64pcrel32 @f, csr_64, implicit %rsp, implicit-def %rsp + MOV32mi %rbx, 1, _, 0, _, 20 + %rax = MOV64rr killed %rbx + %rbx = POP64r implicit-def %rsp, implicit %rsp + RETQ %eax + + bb.2.is_null: + %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags + %rbx = POP64r implicit-def %rsp, implicit %rsp + RETQ %eax + +... +--- +name: inc_store_with_dep_in_dep +# CHECK-LABEL: inc_store_with_dep_in_dep +# CHECK: bb.0.entry: +# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags +# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + %eax = MOV32rr %esi + %esi = ADD32ri killed %esi, 15, implicit-def dead %eflags + MOV32mr killed %rdi, 1, _, 0, _, killed %esi + RETQ %eax + + bb.2.is_null: + %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags + RETQ %eax + +... +--- +name: inc_store_with_load_over_store +# CHECK-LABEL: inc_store_with_load_over_store +# CHECK: bb.0.entry: +# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags +# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + MOV32mi killed %rsi, 1, _, 0, _, 2 + %eax = MOV32rm killed %rdi, 1, _, 0, _ + RETQ %eax + + bb.2.is_null: + %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags + RETQ %eax + +... +--- +name: inc_store_with_store_over_load +# CHECK-LABEL: inc_store_with_store_over_load +# CHECK: bb.0.entry: +# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags +# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + %eax = MOV32rm killed %rsi, 1, _, 0, _ + MOV32mi killed %rdi, 1, _, 0, _, 2 + RETQ %eax + + bb.2.is_null: + %eax = XOR32rr undef %eax, undef %eax, implicit-def dead %eflags + RETQ %eax + +... +--- +name: inc_store_with_store_over_store +# CHECK-LABEL: inc_store_with_store_over_store +# CHECK: bb.0.entry: +# CHECK: TEST64rr %rdi, %rdi, implicit-def %eflags +# CHECK-NEXT: JE_1 %bb.2.is_null, implicit killed %eflags +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + MOV32mi killed %rsi, 1, _, 0, _, 3 + MOV32mi killed %rdi, 1, _, 0, _, 2 + RETQ + + bb.2.is_null: + RETQ + +... +--- +name: inc_store_with_load_and_store +# CHECK-LABEL: inc_store_with_load_and_store +# CHECK: bb.0.entry: +# CHECK: _ = FAULTING_OP 2, %bb.2.is_null, {{[0-9]+}}, killed %rdi, 1, _, 0, _, killed %esi, implicit-def dead %eflags +# CHECK-NEXT: JMP_1 %bb.1.not_null +# CHECK: bb.1.not_null + +alignment: 4 +tracksRegLiveness: true +liveins: + - { reg: '%rdi' } + - { reg: '%rsi' } +body: | + bb.0.entry: + successors: %bb.2.is_null, %bb.1.not_null + liveins: %rdi, %rsi + + TEST64rr %rdi, %rdi, implicit-def %eflags + JE_1 %bb.2.is_null, implicit killed %eflags + + bb.1.not_null: + liveins: %rdi, %rsi + + %esi = ADD32rr %esi, %esi, implicit-def dead %eflags + ADD32mr killed %rdi, 1, _, 0, _, killed %esi, implicit-def dead %eflags + RETQ + + bb.2.is_null: + RETQ + +... -- 2.7.4