1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
4 // ****************************************************************************
5 // File: controller.cpp
9 // controller.cpp: Debugger execution control routines
11 // ****************************************************************************
12 // Putting code & #includes, #defines, etc, before the stdafx.h will
13 // cause the code,etc, to be silently ignored
18 #include "../inc/common.h"
21 #include "../../vm/methoditer.h"
22 #include "../../vm/tailcallhelp.h"
24 const char *GetTType( TraceType tt);
26 #define IsSingleStep(exception) ((exception) == EXCEPTION_SINGLE_STEP)
28 // -------------------------------------------------------------------------
29 // DebuggerController routines
30 // -------------------------------------------------------------------------
32 SPTR_IMPL_INIT(DebuggerPatchTable, DebuggerController, g_patches, NULL);
33 SVAL_IMPL_INIT(BOOL, DebuggerController, g_patchTableValid, FALSE);
35 #if !defined(DACCESS_COMPILE)
37 DebuggerController *DebuggerController::g_controllers = NULL;
38 DebuggerControllerPage *DebuggerController::g_protections = NULL;
39 CrstStatic DebuggerController::g_criticalSection;
40 int DebuggerController::g_cTotalMethodEnter = 0;
43 // Is this patch at a position at which it's safe to take a stack?
44 bool DebuggerControllerPatch::IsSafeForStackTrace()
46 LIMITED_METHOD_CONTRACT;
48 TraceType tt = this->trace.GetTraceType();
49 Module *module = this->key.module;
50 BOOL managed = this->IsManagedPatch();
52 // Patches placed by MgrPush can come at lots of illegal spots. Can't take a stack trace here.
53 if ((module == NULL) && managed && (tt == TRACE_MGR_PUSH))
58 // Consider everything else legal.
59 // This is a little shady for TRACE_FRAME_PUSH. But TraceFrame() needs a stackInfo
60 // to get a RegDisplay (though almost nobody uses it, so perhaps it could be removed).
65 #ifndef FEATURE_EMULATE_SINGLESTEP
66 // returns a pointer to the shared buffer. each call will AddRef() the object
67 // before returning it so callers only need to Release() when they're finished with it.
68 SharedPatchBypassBuffer* DebuggerControllerPatch::GetOrCreateSharedPatchBypassBuffer()
77 if (m_pSharedPatchBypassBuffer == NULL)
79 void *pSharedPatchBypassBufferRX = g_pDebugger->GetInteropSafeExecutableHeap()->Alloc(sizeof(SharedPatchBypassBuffer));
80 #if defined(HOST_OSX) && defined(HOST_ARM64)
81 ExecutableWriterHolder<SharedPatchBypassBuffer> sharedPatchBypassBufferWriterHolder((SharedPatchBypassBuffer*)pSharedPatchBypassBufferRX, sizeof(SharedPatchBypassBuffer));
82 void *pSharedPatchBypassBufferRW = sharedPatchBypassBufferWriterHolder.GetRW();
83 #else // HOST_OSX && HOST_ARM64
84 void *pSharedPatchBypassBufferRW = pSharedPatchBypassBufferRX;
85 #endif // HOST_OSX && HOST_ARM64
86 new (pSharedPatchBypassBufferRW) SharedPatchBypassBuffer();
87 m_pSharedPatchBypassBuffer = (SharedPatchBypassBuffer*)pSharedPatchBypassBufferRX;
89 _ASSERTE(m_pSharedPatchBypassBuffer);
90 TRACE_ALLOC(m_pSharedPatchBypassBuffer);
93 m_pSharedPatchBypassBuffer->AddRef();
95 return m_pSharedPatchBypassBuffer;
97 #endif // !FEATURE_EMULATE_SINGLESTEP
99 // @todo - remove all this splicing trash
100 // This Sort/Splice stuff just reorders the patches within a particular chain such
101 // that when we iterate through by calling GetPatch() and GetNextPatch(DebuggerControllerPatch),
102 // we'll get patches in increasing order of DebuggerControllerTypes.
103 // Practically, this means that calling GetPatch() will return EnC patches before stepping patches.
106 void DebuggerPatchTable::SortPatchIntoPatchList(DebuggerControllerPatch **ppPatch)
108 LOG((LF_CORDB, LL_EVERYTHING, "DPT::SPIPL ppPatch: %p, pPatch: %p \n", ppPatch, (*ppPatch)));
110 DebuggerControllerPatch *patchFirst
111 = (DebuggerControllerPatch *) Find(Hash((*ppPatch)), Key((*ppPatch)));
112 _ASSERTE(patchFirst == (*ppPatch));
113 _ASSERTE((*ppPatch)->controller->GetDCType() != DEBUGGER_CONTROLLER_STATIC);
115 DebuggerControllerPatch *patchNext = GetNextPatch((*ppPatch));
117 //List contains one, (sorted) element
118 if (patchNext == NULL)
120 LOG((LF_CORDB, LL_INFO10000, "DPT::SPIPL: %p single element\n", (*ppPatch)));
124 // If we decide to reorder the list, we'll need to keep the element
125 // indexed by the hash function as the (sorted)first item. Everything else
126 // chains off this element, can thus stay put.
127 // Thus, either the element we just added is already sorted, or else we'll
128 // have to move it elsewhere in the list, meaning that we'll have to swap
129 // the second item & the new item, so that the index points to the proper
130 // first item in the list.
132 //use Cur ptr for case where patch gets appended to list
133 DebuggerControllerPatch *patchCur = patchNext;
135 while (patchNext != NULL &&
136 ((*ppPatch)->controller->GetDCType() >
137 patchNext->controller->GetDCType()) )
139 patchCur = patchNext;
140 patchNext = GetNextPatch(patchNext);
143 if (patchNext == GetNextPatch((*ppPatch)))
145 LOG((LF_CORDB, LL_INFO10000,
146 "DPT::SPIPL: Patch %p is already sorted\n", (*ppPatch)));
147 return; //already sorted
150 LOG((LF_CORDB, LL_INFO10000,
151 "DPT::SPIPL: Patch %p will be moved \n", (*ppPatch)));
153 //remove it from the list
154 SpliceOutOfList((*ppPatch));
156 // the kinda neat thing is: since we put it originally at the front of the list,
157 // and it's not in order, then it must be behind another element of this list,
158 // so we don't have to write any 'SpliceInFrontOf' code.
160 _ASSERTE(patchCur != NULL);
161 SpliceInBackOf((*ppPatch), patchCur);
163 LOG((LF_CORDB, LL_INFO10000,
164 "DPT::SPIPL: Patch %p is now sorted\n", (*ppPatch)));
167 // This can leave the list empty, so don't do this unless you put
168 // the patch back somewhere else.
169 void DebuggerPatchTable::SpliceOutOfList(DebuggerControllerPatch *patch)
171 // We need to get iHash, the index of the ptr within
172 // m_piBuckets, ie it's entry in the hashtable.
173 ULONG iHash = Hash(patch) % m_iBuckets;
174 ULONG iElement = m_piBuckets[iHash];
175 DebuggerControllerPatch *patchFirst
176 = (DebuggerControllerPatch *) EntryPtr(iElement);
178 // Fix up pointers to chain
179 if (patchFirst == patch)
181 // The first patch shouldn't have anything behind it.
182 _ASSERTE(patch->entry.iPrev == DPT_INVALID_SLOT);
184 if (patch->entry.iNext != DPT_INVALID_SLOT)
186 m_piBuckets[iHash] = patch->entry.iNext;
190 m_piBuckets[iHash] = DPT_INVALID_SLOT;
194 if (patch->entry.iNext != DPT_INVALID_SLOT)
196 EntryPtr(patch->entry.iNext)->iPrev = patch->entry.iPrev;
199 if (patch->entry.iPrev != DPT_INVALID_SLOT)
201 EntryPtr(patch->entry.iNext)->iNext = patch->entry.iNext;
204 patch->entry.iNext = DPT_INVALID_SLOT;
205 patch->entry.iPrev = DPT_INVALID_SLOT;
208 void DebuggerPatchTable::SpliceInBackOf(DebuggerControllerPatch *patchAppend,
209 DebuggerControllerPatch *patchEnd)
211 ULONG iAppend = ItemIndex((HASHENTRY*)patchAppend);
212 ULONG iEnd = ItemIndex((HASHENTRY*)patchEnd);
214 patchAppend->entry.iPrev = iEnd;
215 patchAppend->entry.iNext = patchEnd->entry.iNext;
217 if (patchAppend->entry.iNext != DPT_INVALID_SLOT)
218 EntryPtr(patchAppend->entry.iNext)->iPrev = iAppend;
220 patchEnd->entry.iNext = iAppend;
224 //-----------------------------------------------------------------------------
225 // Stack safety rules.
226 // In general, we're safe to crawl whenever we're in preemptive mode.
227 // We're also must be safe at any spot the thread could get synchronized,
228 // because that means that the thread will be stopped to let the debugger shell
229 // inspect it and that can definitely take stack traces.
230 // Basically the only unsafe spot is in the middle of goofy stub with some
231 // partially constructed frame while in coop mode.
232 //-----------------------------------------------------------------------------
234 // Safe if we're at certain types of patches.
235 // See Patch::IsSafeForStackTrace for details.
236 StackTraceTicket::StackTraceTicket(DebuggerControllerPatch * patch)
238 _ASSERTE(patch != NULL);
239 _ASSERTE(patch->IsSafeForStackTrace());
242 // Safe if there was already another stack trace at this spot. (Grandfather clause)
243 // This is commonly used for StepOut, which takes runs stacktraces to crawl up
244 // the stack to find a place to patch.
245 StackTraceTicket::StackTraceTicket(ControllerStackInfo * info)
247 _ASSERTE(info != NULL);
249 // Ensure that the other stack info object actually executed (and thus was
251 _ASSERTE(info->m_dbgExecuted);
254 // Safe b/c the context shows we're in native managed code.
255 // This must be safe because we could always set a managed breakpoint by native
256 // offset and thus synchronize the shell at this spot. So this is
257 // a specific example of the Synchronized case. The fact that we don't actually
258 // synchronize doesn't make us any less safe.
259 StackTraceTicket::StackTraceTicket(const BYTE * ip)
261 _ASSERTE(g_pEEInterface->IsManagedNativeCode(ip));
264 // Safe it we're at a Synchronized point point.
265 StackTraceTicket::StackTraceTicket(Thread * pThread)
267 _ASSERTE(pThread != NULL);
269 // If we're synchronized, the debugger should be stopped.
270 // That means all threads are synced and must be safe to take a stacktrace.
271 // Thus we don't even need to do a thread-specific check.
272 _ASSERTE(g_pDebugger->IsStopped());
275 // DebuggerUserBreakpoint has a special case of safety. See that ctor for details.
276 StackTraceTicket::StackTraceTicket(DebuggerUserBreakpoint * p)
281 //void ControllerStackInfo::GetStackInfo(): GetStackInfo
282 // is invoked by the user to trigger the stack walk. This will
283 // cause the stack walk detailed in the class description to happen.
284 // Thread* thread: The thread to do the stack walk on.
285 // void* targetFP: Can be either NULL (meaning that the bottommost
286 // frame is the target), or an frame pointer, meaning that the
287 // caller wants information about a specific frame.
288 // CONTEXT* pContext: A pointer to a CONTEXT structure. Can be null,
289 // we use our temp context.
290 // bool suppressUMChainFromComPlusMethodFrameGeneric - A ridiculous flag that is trying to narrowly
291 // target a fix for issue 650903.
292 // StackTraceTicket - ticket to ensure that we actually have permission for this stacktrace
293 void ControllerStackInfo::GetStackInfo(
294 StackTraceTicket ticket,
296 FramePointer targetFP,
298 bool suppressUMChainFromComPlusMethodFrameGeneric
301 _ASSERTE(thread != NULL);
303 BOOL contextValid = (pContext != NULL);
306 // We're assuming the thread is protected w/ a frame (which includes the redirection
307 // case). The stackwalker will use that protection to prime the context.
308 pContext = &this->m_tempContext;
312 // If we provided an explicit context for this thread, it better not be redirected.
313 _ASSERTE(!ISREDIRECTEDTHREAD(thread));
316 // Mark this stackwalk as valid so that it can in turn be used to grandfather
317 // in other stackwalks.
318 INDEBUG(m_dbgExecuted = true);
320 m_activeFound = false;
321 m_returnFound = false;
322 m_bottomFP = LEAF_MOST_FRAME;
323 m_targetFP = targetFP;
324 m_targetFrameFound = (m_targetFP == LEAF_MOST_FRAME);
325 m_specialChainReason = CHAIN_NONE;
326 m_suppressUMChainFromComPlusMethodFrameGeneric = suppressUMChainFromComPlusMethodFrameGeneric;
328 int result = DebuggerWalkStack(thread,
336 _ASSERTE(m_activeFound); // All threads have at least one unmanaged frame
338 if (result == SWA_DONE)
340 _ASSERTE(!HasReturnFrame()); // We didn't find a managed return frame
341 _ASSERTE(HasReturnFrame(true)); // All threads have at least one unmanaged frame
345 //---------------------------------------------------------------------------------------
347 // This function "undoes" an unwind, i.e. it takes the active frame (the current frame)
348 // and sets it to be the return frame (the caller frame). Currently it is only used by
349 // the stepper to step out of an LCG method. See DebuggerStepper::DetectHandleLCGMethods()
350 // for more information.
353 // The current frame is valid on entry.
356 // After this function returns, the active frame on this instance of ControllerStackInfo will no longer be valid.
358 // This function is specifically for DebuggerStepper::DetectHandleLCGMethods(). Using it in other scencarios may
359 // require additional changes.
362 void ControllerStackInfo::SetReturnFrameWithActiveFrame()
364 // Copy the active frame into the return frame.
365 m_returnFound = true;
366 m_returnFrame = m_activeFrame;
368 // Invalidate the active frame.
369 m_activeFound = false;
371 m_activeFrame.fp = LEAF_MOST_FRAME;
374 // Fill in a controller-stack info.
375 StackWalkAction ControllerStackInfo::WalkStack(FrameInfo *pInfo, void *data)
377 LIMITED_METHOD_CONTRACT;
379 _ASSERTE(!pInfo->HasStubFrame()); // we didn't ask for stub frames.
381 ControllerStackInfo *i = (ControllerStackInfo *) data;
383 // save this info away for later use.
384 if (i->m_bottomFP == LEAF_MOST_FRAME)
385 i->m_bottomFP = pInfo->fp;
387 // This is part of the targeted fix for issue 650903 (see the other
388 // parts in code:TrackUMChain and code:DebuggerStepper::TrapStepOut).
390 // pInfo->fIgnoreThisFrameIfSuppressingUMChainFromComPlusMethodFrameGeneric has been
391 // set by TrackUMChain to help us remember that the current frame we're looking at is
392 // ComPlusMethodFrameGeneric (we can't rely on looking at pInfo->frame to check
393 // this), and i->m_suppressUMChainFromComPlusMethodFrameGeneric has been set by the
394 // dude initiating this walk to remind us that our goal in life is to do a Step Out
395 // during managed-only debugging. These two things together tell us we should ignore
396 // this frame, rather than erroneously identifying it as the target frame.
398 #ifdef FEATURE_COMINTEROP
399 if(i->m_suppressUMChainFromComPlusMethodFrameGeneric &&
400 (pInfo->chainReason == CHAIN_ENTER_UNMANAGED) &&
401 (pInfo->fIgnoreThisFrameIfSuppressingUMChainFromComPlusMethodFrameGeneric))
405 #endif // FEATURE_COMINTEROP
407 //have we reached the correct frame yet?
408 if (!i->m_targetFrameFound &&
409 IsEqualOrCloserToLeaf(i->m_targetFP, pInfo->fp))
411 i->m_targetFrameFound = true;
414 if (i->m_targetFrameFound )
416 // Ignore Enter-managed chains.
417 if (pInfo->chainReason == CHAIN_ENTER_MANAGED)
422 if (i->m_activeFound )
424 if (pInfo->chainReason == CHAIN_CLASS_INIT)
425 i->m_specialChainReason = pInfo->chainReason;
427 if (pInfo->fp != i->m_activeFrame.fp) // avoid dups
429 i->m_returnFrame = *pInfo;
431 #if defined(FEATURE_EH_FUNCLETS)
432 CopyREGDISPLAY(&(i->m_returnFrame.registers), &(pInfo->registers));
433 #endif // FEATURE_EH_FUNCLETS
435 i->m_returnFound = true;
437 // We care if the current frame is unmanaged
438 // Continue unless we found a managed return frame.
439 return pInfo->managed ? SWA_ABORT : SWA_CONTINUE;
444 i->m_activeFrame = *pInfo;
446 #if defined(FEATURE_EH_FUNCLETS)
447 CopyREGDISPLAY(&(i->m_activeFrame.registers), &(pInfo->registers));
448 #endif // FEATURE_EH_FUNCLETS
450 i->m_activeFound = true;
461 // Note that patches may be reallocated - do not keep a pointer to a patch.
463 DebuggerControllerPatch *DebuggerPatchTable::AddPatchForMethodDef(DebuggerController *controller,
466 MethodDesc* pMethodDescFilter,
469 DebuggerPatchKind kind,
471 AppDomain *pAppDomain,
472 SIZE_T primaryEnCVersion,
473 DebuggerJitInfo *dji)
483 LOG( (LF_CORDB,LL_INFO10000,"DPT:APFMD 0x%x with dji %p, %s offset 0x%zx controller:%p AD:%p\n",
484 md, dji, (offsetIsIL ? "IL" : "native"), offset, controller, pAppDomain));
486 DebuggerFunctionKey key;
491 // Get a new uninitialized patch object
492 DebuggerControllerPatch *patch = (DebuggerControllerPatch *) Add(HashKey(&key));
496 #ifndef FEATURE_EMULATE_SINGLESTEP
498 #endif // !FEATURE_EMULATE_SINGLESTEP
500 //initialize the patch data structure.
501 InitializePRD(&(patch->opcode));
502 patch->controller = controller;
503 patch->key.module = module;
505 patch->pMethodDescFilter = pMethodDescFilter;
506 patch->offset = offset;
507 patch->offsetIsIL = offsetIsIL;
508 patch->address = NULL;
510 patch->trace.Bad_SetTraceType(DPT_DEFAULT_TRACE_TYPE); // TRACE_OTHER
511 patch->refCount = 1; // AddRef()
512 patch->fSaveOpcode = false;
513 patch->pAppDomain = pAppDomain;
514 patch->patchId = m_patchId++;
516 if (kind == PATCH_KIND_IL_PRIMARY)
518 _ASSERTE(dji == NULL);
519 patch->encVersion = primaryEnCVersion;
529 LOG((LF_CORDB,LL_INFO10000,"DPT:APFMD w/ encVersion 0x%zx, patchId:0x%zx\n",
530 dji->m_encVersion, patch->patchId));
532 else if (kind == PATCH_KIND_IL_PRIMARY)
534 LOG((LF_CORDB,LL_INFO10000,"DPT:APFMD w/ encVersion 0x%zx, patchId:0x%zx (primary)\n",
535 primaryEnCVersion, patch->patchId));
539 LOG((LF_CORDB,LL_INFO10000,"DPT:APFMD w/ no dji or dmi, patchId:0x%zx\n",
543 // This patch is not yet bound or activated
544 _ASSERTE( !patch->IsBound() );
545 _ASSERTE( !patch->IsActivated() );
547 // The only kind of patch with IL offset is the IL primary patch.
548 _ASSERTE(patch->IsILPrimaryPatch() || patch->offsetIsIL == FALSE);
550 // The only kind of patch that allows a MethodDescFilter is the IL primary patch
551 _ASSERTE(patch->IsILPrimaryPatch() || patch->pMethodDescFilter == NULL);
553 // Zero is the only native offset that we allow to bind across different jitted
554 // code bodies. There isn't any sensible meaning to binding at some other native offset.
555 // Even if all the code bodies had an instruction that started at that offset there is
556 // no guarantee those instructions represent a semantically equivalent point in the
557 // method's execution.
558 _ASSERTE(!(patch->IsILPrimaryPatch() && !patch->offsetIsIL && patch->offset != 0));
563 // Create and bind a patch to the specified address
564 // The caller should immediately activate the patch since we typically expect bound patches
565 // will always be activated.
566 DebuggerControllerPatch *DebuggerPatchTable::AddPatchForAddress(DebuggerController *controller,
569 DebuggerPatchKind kind,
570 CORDB_ADDRESS_TYPE *address,
572 AppDomain *pAppDomain,
573 DebuggerJitInfo *dji,
587 _ASSERTE(kind == PATCH_KIND_NATIVE_MANAGED || kind == PATCH_KIND_NATIVE_UNMANAGED);
588 LOG((LF_CORDB,LL_INFO10000,"DCP:AddPatchForAddress bound "
589 "absolute to 0x%p with dji 0x%p (mdDef:0x%x) "
590 "controller:0x%p AD:0x%p\n",
591 address, dji, (fd!=NULL?fd->GetMemberDef():0), controller,
594 // get new uninitialized patch object
595 DebuggerControllerPatch *patch =
596 (DebuggerControllerPatch *) Add(HashAddress(address));
602 #ifndef FEATURE_EMULATE_SINGLESTEP
604 #endif // !FEATURE_EMULATE_SINGLESTEP
606 // initialize the patch data structure
607 InitializePRD(&(patch->opcode));
608 patch->controller = controller;
612 patch->key.module = NULL;
613 patch->key.md = mdTokenNil;
617 patch->key.module = g_pEEInterface->MethodDescGetModule(fd);
618 patch->key.md = fd->GetMemberDef();
620 patch->pMethodDescFilter = NULL;
621 patch->offset = offset;
622 patch->offsetIsIL = FALSE;
623 patch->address = address;
625 patch->trace.Bad_SetTraceType(traceType);
626 patch->refCount = 1; // AddRef()
627 patch->fSaveOpcode = false;
628 patch->pAppDomain = pAppDomain;
629 if (patchId == DCP_PATCHID_INVALID)
630 patch->patchId = m_patchId++;
632 patch->patchId = patchId;
639 LOG((LF_CORDB,LL_INFO10000,"AddPatchForAddress w/ version with no dji, patchId:0x%zx\n", patch->patchId));
643 LOG((LF_CORDB,LL_INFO10000,"AddPatchForAddress w/ version 0x%zx, "
644 "patchId:0x%zx\n", dji->m_methodInfo->GetCurrentEnCVersion(), patch->patchId));
646 _ASSERTE( fd==NULL || fd == dji->m_nativeCodeVersion.GetMethodDesc() );
649 SortPatchIntoPatchList(&patch);
651 // This patch is bound but not yet activated
652 _ASSERTE( patch->IsBound() );
653 _ASSERTE( !patch->IsActivated() );
655 // The only kind of patch with IL offset is the IL primary patch.
656 _ASSERTE(patch->IsILPrimaryPatch() || patch->offsetIsIL == FALSE);
660 // Set the native address for this patch.
661 void DebuggerPatchTable::BindPatch(DebuggerControllerPatch *patch, CORDB_ADDRESS_TYPE *address)
663 _ASSERTE(patch != NULL);
664 _ASSERTE(address != NULL);
665 _ASSERTE( !patch->IsILPrimaryPatch() );
666 _ASSERTE(!patch->IsBound() );
668 //Since the actual patch doesn't move, we don't have to worry about
669 //zeroing out the opcode field (see lengthy comment above)
670 // Since the patch is double-hashed based off Address, if we change the address,
671 // we must remove and reinsert the patch.
672 CHashTable::Delete(HashKey(&patch->key), ItemIndex((HASHENTRY*)patch));
674 patch->address = address;
676 CHashTable::Add(HashAddress(address), ItemIndex((HASHENTRY*)patch));
678 SortPatchIntoPatchList(&patch);
680 _ASSERTE(patch->IsBound() );
681 _ASSERTE(!patch->IsActivated() );
684 // Disassociate a patch from a specific code address.
685 void DebuggerPatchTable::UnbindPatch(DebuggerControllerPatch *patch)
687 _ASSERTE(patch != NULL);
688 _ASSERTE(patch->kind != PATCH_KIND_IL_PRIMARY);
689 _ASSERTE(patch->IsBound() );
690 _ASSERTE(!patch->IsActivated() );
692 //<REVISIT_TODO>@todo We're hosed if the patch hasn't been primed with
693 // this info & we can't get it...</REVISIT_TODO>
694 if (patch->key.module == NULL ||
695 patch->key.md == mdTokenNil)
697 MethodDesc *fd = g_pEEInterface->GetNativeCodeMethodDesc(
698 dac_cast<PCODE>(patch->address));
699 _ASSERTE( fd != NULL );
700 patch->key.module = g_pEEInterface->MethodDescGetModule(fd);
701 patch->key.md = fd->GetMemberDef();
704 // Update it's index entry in the table to use it's unbound key
705 // Since the patch is double-hashed based off Address, if we change the address,
706 // we must remove and reinsert the patch.
707 CHashTable::Delete( HashAddress(patch->address),
708 ItemIndex((HASHENTRY*)patch));
710 patch->address = NULL; // we're no longer bound to this address
712 CHashTable::Add( HashKey(&patch->key),
713 ItemIndex((HASHENTRY*)patch));
715 _ASSERTE(!patch->IsBound() );
719 void DebuggerPatchTable::RemovePatch(DebuggerControllerPatch *patch)
721 // Since we're deleting this patch, it must not be activated (i.e. it must not have a stored opcode)
722 _ASSERTE( !patch->IsActivated() );
723 #ifndef FEATURE_EMULATE_SINGLESTEP
725 #endif // !FEATURE_EMULATE_SINGLESTEP
728 // Because of the implementation of CHashTable, we can safely
729 // delete elements while iterating through the table. This
730 // behavior is relied upon - do not change to a different
731 // implementation without considering this fact.
733 Delete(Hash(patch), (HASHENTRY *) patch);
736 DebuggerControllerPatch *DebuggerPatchTable::GetNextPatch(DebuggerControllerPatch *prev)
741 // Start at the next entry in the chain.
742 // @todo - note that: EntryPtr(ItemIndex(x)) == x
743 iNext = EntryPtr(ItemIndex((HASHENTRY*)prev))->iNext;
745 // Search until we hit the end.
746 while (iNext != UINT32_MAX)
749 psEntry = EntryPtr(iNext);
751 // Careful here... we can hash the entries in this table
752 // by two types of keys. In this type of search, the type
753 // of the second key (psEntry) does not necessarily
754 // indicate the type of the first key (prev), so we have
755 // to check for sure.
756 DebuggerControllerPatch *pc2 = (DebuggerControllerPatch*)psEntry;
758 if (((pc2->address == NULL) && (prev->address == NULL)) ||
759 ((pc2->address != NULL) && (prev->address != NULL)))
760 if (!Cmp(Key(prev), psEntry))
763 // Advance to the next item in the chain.
764 iNext = psEntry->iNext;
771 void DebuggerPatchTable::CheckPatchTable()
773 if (NULL != m_pcEntries)
775 LOG((LF_CORDB,LL_INFO1000, "DPT:CPT: %u\n", m_iEntries));
776 DebuggerControllerPatch *dcp;
778 while (i++ < m_iEntries)
780 dcp = (DebuggerControllerPatch*)&(((DebuggerControllerPatch *)m_pcEntries)[i]);
781 if (dcp->opcode != 0 )
790 // Count how many patches are in the table.
792 int DebuggerPatchTable::GetNumberOfPatches()
796 if (NULL != m_pcEntries)
798 DebuggerControllerPatch *dcp;
801 while (i++ <m_iEntries)
803 dcp = (DebuggerControllerPatch*)&(((DebuggerControllerPatch *)m_pcEntries)[i]);
805 if (dcp->IsActivated() || !dcp->IsFree())
813 //-----------------------------------------------------------------------------
814 // Debug check that we only have 1 thread-starter per thread.
815 // pNew - the new DTS. We'll make sure there's not already a DTS on this thread.
816 //-----------------------------------------------------------------------------
817 void DebuggerController::EnsureUniqueThreadStarter(DebuggerThreadStarter * pNew)
819 // This lock should be safe to take since our base class ctor takes it.
820 ControllerLockHolder lockController;
821 DebuggerController * pExisting = g_controllers;
822 while(pExisting != NULL)
824 if (pExisting->GetDCType() == DEBUGGER_CONTROLLER_THREAD_STARTER)
826 if (pExisting != pNew)
828 // If we have 2 thread starters, they'd better be on different threads.
829 _ASSERTE((pExisting->GetThread() != pNew->GetThread()));
832 pExisting = pExisting->m_next;
837 //-----------------------------------------------------------------------------
838 // If we have a thread-starter on the given EE thread, make sure it's cancel.
839 // Thread-Starters normally delete themselves when they fire. But if the EE
840 // destroys the thread before it fires, then we'd still have an active DTS.
841 //-----------------------------------------------------------------------------
842 void DebuggerController::CancelOutstandingThreadStarter(Thread * pThread)
844 _ASSERTE(pThread != NULL);
845 LOG((LF_CORDB, LL_EVERYTHING, "DC:CancelOutstandingThreadStarter - checking on thread=%p\n", pThread));
847 ControllerLockHolder lockController;
848 DebuggerController * p = g_controllers;
851 if (p->GetDCType() == DEBUGGER_CONTROLLER_THREAD_STARTER)
853 if (p->GetThread() == pThread)
855 LOG((LF_CORDB, LL_EVERYTHING, "DC:CancelOutstandingThreadStarter Found=%p\n", p));
857 // There's only 1 DTS per thread, so once we find it, we can quit.
865 // The common case is that our DTS hit its patch and did a SendEvent (and
866 // deleted itself). So usually we'll get through the whole list w/o deleting anything.
870 //void DebuggerController::Initialize() Sets up the static
871 // variables for the static DebuggerController class.
872 // How: initializes the critical section
873 HRESULT DebuggerController::Initialize()
879 // This can be called in an "early attach" case, so DebuggerIsInvolved()
880 // will be b/c we don't realize the debugger's attaching to us.
881 //PRECONDITION(DebuggerIsInvolved());
882 POSTCONDITION(CheckPointer(g_patches));
883 POSTCONDITION(RETVAL == S_OK);
887 if (g_patches == NULL)
889 ZeroMemory(&g_criticalSection, sizeof(g_criticalSection)); // Init() expects zero-init memory.
891 // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
892 // If you remove this flag, we will switch to preemptive mode when entering
893 // g_criticalSection, which means all functions that enter it will become
894 // GC_TRIGGERS. (This includes all uses of ControllerLockHolder.) So be sure
895 // to update the contracts if you remove this flag.
896 g_criticalSection.Init(CrstDebuggerController,
897 (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_REENTRANCY | CRST_DEBUGGER_THREAD));
899 g_patches = new (interopsafe) DebuggerPatchTable();
900 _ASSERTE(g_patches != NULL); // throws on oom
902 HRESULT hr = g_patches->Init();
906 DeleteInteropSafe(g_patches);
910 g_patchTableValid = TRUE;
911 TRACE_ALLOC(g_patches);
914 _ASSERTE(g_patches != NULL);
920 //---------------------------------------------------------------------------------------
922 // Constructor for a controller
925 // pThread - thread that controller has affinity to. NULL if no thread - affinity.
926 // pAppdomain - appdomain that controller has affinity to. NULL if no AD affinity.
930 // "Affinity" is per-controller specific. Affinity is generally passed on to
931 // any patches the controller creates. So if a controller has affinity to Thread X,
932 // then any patches it creates will only fire on Thread-X.
934 //---------------------------------------------------------------------------------------
936 DebuggerController::DebuggerController(Thread * pThread, AppDomain * pAppDomain)
937 : m_pAppDomain(pAppDomain),
940 m_exceptionHook(false),
942 m_traceCallFP(ROOT_MOST_FRAME),
943 m_unwindFP(LEAF_MOST_FRAME),
944 m_eventQueuedCount(0),
946 m_fEnableMethodEnter(false)
956 LOG((LF_CORDB, LL_INFO10000, "DC::DC %p m_eventQueuedCount=%d\n", this, m_eventQueuedCount));
957 ControllerLockHolder lockController;
959 m_next = g_controllers;
960 g_controllers = this;
964 //---------------------------------------------------------------------------------------
966 // Debugger::Controller::DeleteAllControlers - deletes all debugger contollers
975 // This is used at detach time to remove all DebuggerControllers. This will remove all
976 // patches and do whatever other cleanup individual DebuggerControllers consider
977 // necessary to allow the debugger to detach and the process to run normally.
980 void DebuggerController::DeleteAllControllers()
989 ControllerLockHolder lockController;
990 DebuggerController * pDebuggerController = g_controllers;
991 DebuggerController * pNextDebuggerController = NULL;
993 while (pDebuggerController != NULL)
995 pNextDebuggerController = pDebuggerController->m_next;
996 pDebuggerController->DebuggerDetachClean();
997 pDebuggerController->Delete();
998 pDebuggerController = pNextDebuggerController;
1002 DebuggerController::~DebuggerController()
1012 ControllerLockHolder lockController;
1014 _ASSERTE(m_eventQueuedCount == 0);
1019 // Remove controller from list
1022 DebuggerController **c;
1032 // void DebuggerController::Delete()
1033 // What: Marks an instance as deletable. If it's ref count
1034 // (see Enqueue, Dequeue) is currently zero, it actually gets deleted
1035 // How: Set m_deleted to true. If m_eventQueuedCount==0, delete this
1036 void DebuggerController::Delete()
1045 if (m_eventQueuedCount == 0)
1047 LOG((LF_CORDB|LF_ENC, LL_INFO100000, "DC::Delete: actual delete of this: %p\n", this));
1049 DeleteInteropSafe(this);
1053 LOG((LF_CORDB|LF_ENC, LL_INFO100000, "DC::Delete: marked for future delete of this: %p, m_eventQueuedCount=%d\n",
1054 this, m_eventQueuedCount));
1059 void DebuggerController::DebuggerDetachClean()
1065 void DebuggerController::AddRefPatch(DebuggerControllerPatch *patch)
1067 LOG((LF_CORDB, LL_INFO10000, "DC::ARP: patchId:0x%zx\n", patch->patchId));
1072 void DebuggerController::ReleasePatch(DebuggerControllerPatch *patch)
1075 if (patch->refCount == 0)
1077 LOG((LF_CORDB, LL_INFO10000, "DC::RP: patchId:0x%zx deleted, deactivating\n", patch->patchId));
1078 DeactivatePatch(patch);
1079 GetPatchTable()->RemovePatch(patch);
1083 // void DebuggerController::DisableAll() DisableAll removes
1084 // all control from the controller. This includes all patches & page
1085 // protection. This will invoke Disable* for unwind,singlestep,
1086 // exceptionHook, and tracecall. It will also go through the patch table &
1087 // attempt to remove any and all patches that belong to this controller.
1088 // If the patch is currently triggering, then a Dispatch* method expects the
1089 // patch to be there after we return, so we instead simply mark the patch
1090 // itself as deleted.
1091 void DebuggerController::DisableAll()
1101 LOG((LF_CORDB,LL_INFO1000, "DC::DisableAll\n"));
1102 _ASSERTE(g_patches != NULL);
1104 ControllerLockHolder ch;
1107 // Remove controller's patches from list.
1108 // Don't do this on shutdown because the shutdown thread may have killed another thread asynchronously
1109 // thus leaving the patchtable in an inconsistent state such that we may fail trying to walk it.
1110 // Since we're exiting anyways, leaving int3 in the code can't harm anybody.
1112 if (!g_fProcessDetach)
1115 for (DebuggerControllerPatch *patch = g_patches->GetFirstPatch(&f);
1117 patch = g_patches->GetNextPatch(&f))
1119 if (patch->controller == this)
1121 ReleasePatch(patch);
1127 DisableSingleStep();
1128 if (m_exceptionHook)
1129 DisableExceptionHook();
1130 if (m_unwindFP != LEAF_MOST_FRAME)
1134 if (m_fEnableMethodEnter)
1135 DisableMethodEnter();
1139 // void DebuggerController::Enqueue() What: Does
1140 // reference counting so we don't toast a
1141 // DebuggerController while it's in a Dispatch queue.
1142 // Why: In DispatchPatchOrSingleStep, we can't hold locks when going
1143 // into PreEmptiveGC mode b/c we'll create a deadlock.
1144 // So we have to UnLock() prior to
1145 // EnablePreEmptiveGC(). But somebody else can show up and delete the
1146 // DebuggerControllers since we no longer have the lock. So we have to
1147 // do this reference counting thing to make sure that the controllers
1148 // don't get toasted as we're trying to invoke SendEvent on them. We have to
1149 // reacquire the lock before invoking Dequeue because Dequeue may
1150 // result in the controller being deleted, which would change the global
1152 // How: InterlockIncrement( m_eventQueuedCount )
1153 void DebuggerController::Enqueue()
1155 LIMITED_METHOD_CONTRACT;
1157 m_eventQueuedCount++;
1158 LOG((LF_CORDB, LL_INFO10000, "DC::Enq DC: %p m_eventQueuedCount at 0x%x\n",
1159 this, m_eventQueuedCount));
1162 // void DebuggerController::Dequeue() What: Does
1163 // reference counting so we don't toast a
1164 // DebuggerController while it's in a Dispatch queue.
1165 // How: InterlockDecrement( m_eventQueuedCount ), delete this if
1166 // m_eventQueuedCount == 0 AND m_deleted has been set to true
1167 void DebuggerController::Dequeue()
1176 LOG((LF_CORDB, LL_INFO10000, "DC::Deq DC: %p m_eventQueuedCount at 0x%x\n",
1177 this, m_eventQueuedCount));
1178 if (--m_eventQueuedCount == 0)
1183 DeleteInteropSafe(this);
1189 // bool DebuggerController::BindPatch() If the method has
1190 // been JITted and isn't hashed by address already, then hash
1191 // it into the hashtable by address and not DebuggerFunctionKey.
1192 // If the patch->address field is nonzero, we're done.
1193 // Otherwise ask g_pEEInterface to FindLoadedMethodRefOrDef, then
1194 // GetFunctionAddress of the method, if the method is in IL,
1195 // MapILOffsetToNative. If everything else went Ok, we can now invoke
1196 // g_patches->BindPatch.
1197 // Returns: false if we know that we can't bind the patch immediately.
1198 // true if we either can bind the patch right now, or can't right now,
1199 // but might be able to in the future (eg, the method hasn't been JITted)
1201 // Have following outcomes:
1202 // 1) Succeeded in binding the patch to a raw address. patch->address is set.
1203 // (Note we still must apply the patch to put the int 3 in.)
1204 // returns true, *pFail = false
1206 // 2) Fails to bind, but a future attempt may succeed. Obvious ex, for an IL-only
1207 // patch on an unjitted method.
1208 // returns false, *pFail = false
1210 // 3) Fails to bind because something's wrong. Ex: bad IL offset, no DJI to do a
1211 // mapping with. Future calls will fail too.
1212 // returns false, *pFail = true
1213 bool DebuggerController::BindPatch(DebuggerControllerPatch *patch,
1215 CORDB_ADDRESS_TYPE *startAddr)
1219 THROWS; // from GetJitInfo
1221 MODE_ANY; // don't really care what mode we're in.
1223 PRECONDITION(ThisMaybeHelperThread());
1227 _ASSERTE(patch != NULL);
1228 _ASSERTE(!patch->IsILPrimaryPatch());
1229 _ASSERTE(pMD != NULL);
1231 LOG((LF_CORDB,LL_INFO10000, "DC::BP: Patch %p (patchId:0x%zx) to %s::%s (pMD: %p) at %p\n",
1232 patch, patch->patchId, pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, pMD, startAddr));
1235 // Translate patch to address, if it hasn't been already.
1238 if (patch->address != NULL)
1243 if (startAddr == NULL)
1245 if (patch->HasDJI() && patch->GetDJI()->m_jitComplete)
1247 startAddr = (CORDB_ADDRESS_TYPE *) CORDB_ADDRESS_TO_PTR(patch->GetDJI()->m_addrOfCode);
1248 _ASSERTE(startAddr != NULL);
1250 //We should never be calling this function with both a NULL startAddr and a DJI that doesn't have code.
1251 _ASSERTE(startAddr != NULL);
1254 _ASSERTE(!g_pEEInterface->IsStub((const BYTE *)startAddr));
1256 // If we've jitted, map to a native offset.
1257 DebuggerJitInfo *info = g_pDebugger->GetJitInfo(pMD, (const BYTE *)startAddr);
1262 LOG((LF_CORDB,LL_INFO10000, "DC::BP: For startAddr %p, didn't find a DJI\n", startAddr));
1267 // There is a strange case with prejitted code and unjitted trace patches. We can enter this function
1268 // with no DebuggerJitInfo created, then have the call just above this actually create the
1269 // DebuggerJitInfo, which causes JitComplete to be called, which causes all patches to be bound! If this
1270 // happens, then we don't need to continue here (its already been done recursively) and we don't need to
1271 // re-active the patch, so we return false from right here. We can check this by seeing if we suddenly
1272 // have the address in the patch set.
1273 if (patch->address != NULL)
1275 LOG((LF_CORDB,LL_INFO10000, "DC::BP: patch bound recursively by GetJitInfo, bailing...\n"));
1279 LOG((LF_CORDB,LL_INFO10000, "DC::BP: For startAddr %p, got DJI %p, from %p size: 0x%zu\n",
1280 startAddr, info, info->m_addrOfCode, info->m_sizeOfCode));
1283 LOG((LF_CORDB, LL_INFO10000, "DC::BP: Trying to bind patch in %s::%s version %zu\n",
1284 pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, info ? info->m_encVersion : (SIZE_T)-1));
1286 _ASSERTE(g_patches != NULL);
1288 CORDB_ADDRESS_TYPE *addr = (CORDB_ADDRESS_TYPE *)
1289 CodeRegionInfo::GetCodeRegionInfo(NULL, NULL, startAddr).OffsetToAddress(patch->offset);
1290 g_patches->BindPatch(patch, addr);
1292 LOG((LF_CORDB, LL_INFO10000, "DC::BP:Binding patch at %p (off:0x%zx)\n", addr, patch->offset));
1297 // bool DebuggerController::ApplyPatch() applies
1298 // the patch described to the code, and
1299 // remembers the replaced opcode. Note that the same address
1300 // cannot be patched twice at the same time.
1301 // Grabs the opcode & stores in patch, then sets a break
1302 // instruction for either native or IL.
1303 // VirtualProtect & some macros. Returns false if anything
1305 // DebuggerControllerPatch *patch: The patch, indicates where
1306 // to set the INT3 instruction
1307 // Returns: true if the user break instruction was successfully
1308 // placed into the code-stream, false otherwise
1309 bool DebuggerController::ApplyPatch(DebuggerControllerPatch *patch)
1311 _ASSERTE(patch != NULL);
1313 LOG((LF_CORDB, LL_INFO10000, "DC::ApplyPatch %p, patchId:0x%zx at addr %p\n",
1314 patch, patch->patchId, patch->address));
1316 // If we try to apply an already applied patch, we'll override our saved opcode
1317 // with the break opcode and end up getting a break in out patch bypass buffer.
1318 _ASSERTE(!patch->IsActivated() );
1319 _ASSERTE(patch->IsBound());
1324 _ASSERTE(!(g_pConfig->GetGCStressLevel() & (EEConfig::GCSTRESS_INSTR_JIT|EEConfig::GCSTRESS_INSTR_NGEN))
1325 && "Debugger does not work with GCSTRESS 0x4 or 0x8");
1327 if (patch->IsNativePatch())
1329 if (patch->fSaveOpcode)
1331 // We only used SaveOpcode for when we've moved code, so
1332 // the patch should already be there.
1333 patch->opcode = patch->opcodeSaved;
1334 _ASSERTE( AddressIsBreakpoint(patch->address) );
1339 VerifyExecutableAddress((BYTE*)patch->address);
1342 LPVOID baseAddress = (LPVOID)(patch->address);
1344 #if !defined(HOST_OSX) || !defined(HOST_ARM64)
1347 if (!VirtualProtect(baseAddress,
1348 CORDbg_BREAK_INSTRUCTION_SIZE,
1349 PAGE_EXECUTE_READWRITE, &oldProt))
1351 // we may be seeing unwriteable directly mapped executable memory.
1352 // let's try copy-on-write instead,
1353 if (!VirtualProtect(baseAddress,
1354 CORDbg_BREAK_INSTRUCTION_SIZE,
1355 PAGE_EXECUTE_WRITECOPY, &oldProt))
1357 _ASSERTE(!"VirtualProtect of code page failed");
1361 #endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
1363 patch->opcode = CORDbgGetInstruction(patch->address);
1365 CORDbgInsertBreakpoint((CORDB_ADDRESS_TYPE *)patch->address);
1366 LOG((LF_CORDB, LL_EVERYTHING, "DC::ApplyPatch Breakpoint was inserted at %p for opcode %x\n",
1367 patch->address, patch->opcode));
1369 #if !defined(HOST_OSX) || !defined(HOST_ARM64)
1370 if (!VirtualProtect(baseAddress,
1371 CORDbg_BREAK_INSTRUCTION_SIZE,
1374 _ASSERTE(!"VirtualProtect of code page failed");
1377 #endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
1379 // TODO: : determine if this is needed for AMD64
1380 #if defined(TARGET_X86) //REVISIT_TODO what is this?!
1386 // !!! IL patch logic assumes reference insruction encoding
1388 if (!VirtualProtect((void *) patch->address, 2,
1389 PAGE_EXECUTE_READWRITE, &oldProt))
1391 if (!VirtualProtect((void*)patch->address, 2,
1392 PAGE_EXECUTE_WRITECOPY, &oldProt))
1394 _ASSERTE(!"VirtualProtect of code page failed");
1400 (unsigned int) *(unsigned short*)(patch->address+1);
1402 _ASSERTE(patch->opcode != CEE_BREAK);
1404 ExecutableWriterHolder<BYTE> breakpointWriterHolder((BYTE*)patch->address, 2);
1405 *(unsigned short *) (breakpointWriterHolder.GetRW()+1) = CEE_BREAK;
1407 if (!VirtualProtect((void *) patch->address, 2, oldProt, &oldProt))
1409 _ASSERTE(!"VirtualProtect of code page failed");
1418 // bool DebuggerController::UnapplyPatch()
1419 // UnapplyPatch removes the patch described by the patch.
1420 // (CopyOpcodeFromAddrToPatch, in reverse.)
1421 // Looks a lot like CopyOpcodeFromAddrToPatch, except that we use a macro to
1422 // copy the instruction back to the code-stream & immediately set the
1423 // opcode field to 0 so ReadMemory,WriteMemory will work right.
1424 // Note that it's very important to zero out the opcode field, as it
1425 // is used by the right side to determine if a patch is
1428 // DebuggerControllerPatch * patch: Patch to remove
1429 // Returns: true if the patch was unapplied, false otherwise
1430 bool DebuggerController::UnapplyPatch(DebuggerControllerPatch *patch)
1432 _ASSERTE(patch != NULL);
1433 _ASSERTE(patch->address != NULL);
1434 _ASSERTE(patch->IsActivated() );
1436 LOG((LF_CORDB, LL_INFO1000, "DC::UnapplyPatch %p, patchId:0x%zx\n",
1437 patch, patch->patchId));
1439 if (patch->IsNativePatch())
1441 if (patch->fSaveOpcode)
1443 // We're doing this for MoveCode, and we don't want to
1444 // overwrite something if we don't get moved far enough.
1445 patch->opcodeSaved = patch->opcode;
1446 InitializePRD(&(patch->opcode));
1447 _ASSERTE( !patch->IsActivated() );
1451 LPVOID baseAddress = (LPVOID)(patch->address);
1453 #if !defined(HOST_OSX) || !defined(HOST_ARM64)
1456 if (!VirtualProtect(baseAddress,
1457 CORDbg_BREAK_INSTRUCTION_SIZE,
1458 PAGE_EXECUTE_READWRITE, &oldProt))
1460 if (!VirtualProtect(baseAddress,
1461 CORDbg_BREAK_INSTRUCTION_SIZE,
1462 PAGE_EXECUTE_WRITECOPY, &oldProt))
1465 // We may be trying to remove a patch from memory
1466 // which has been unmapped. We can ignore the
1467 // error in this case.
1469 InitializePRD(&(patch->opcode));
1473 #endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
1475 CORDbgSetInstruction((CORDB_ADDRESS_TYPE *)patch->address, patch->opcode);
1477 // VERY IMPORTANT to zero out opcode, else we might mistake
1478 // this patch for an active one on ReadMem/WriteMem (see
1479 // header file comment).
1480 InitializePRD(&(patch->opcode));
1482 #if !defined(HOST_OSX) || !defined(HOST_ARM64)
1483 if (!VirtualProtect(baseAddress,
1484 CORDbg_BREAK_INSTRUCTION_SIZE,
1487 _ASSERTE(!"VirtualProtect of code page failed");
1490 #endif // !defined(HOST_OSX) || !defined(HOST_ARM64)
1496 if (!VirtualProtect((void *) patch->address, 2,
1497 PAGE_EXECUTE_READWRITE, &oldProt))
1499 if (!VirtualProtect((void*)patch->address, 2,
1500 PAGE_EXECUTE_WRITECOPY, &oldProt))
1503 // We may be trying to remove a patch from memory
1504 // which has been unmapped. We can ignore the
1505 // error in this case.
1507 InitializePRD(&(patch->opcode));
1513 // !!! IL patch logic assumes reference encoding
1515 // TODO: : determine if this is needed for AMD64
1516 #if defined(TARGET_X86)
1517 _ASSERTE(*(unsigned short*)(patch->address+1) == CEE_BREAK);
1519 ExecutableWriterHolder<BYTE> breakpointWriterHolder((BYTE*)patch->address, 2);
1520 *(unsigned short *) (breakpointWriterHolder.GetRW()+1)
1521 = (unsigned short) patch->opcode;
1522 #endif //this makes no sense on anything but X86
1524 // VERY IMPORTANT to zero out opcode, else we might mistake
1525 // this patch for an active one on ReadMem/WriteMem (see
1526 // header file comment.
1527 InitializePRD(&(patch->opcode));
1529 if (!VirtualProtect((void *) patch->address, 2, oldProt, &oldProt))
1531 _ASSERTE(!"VirtualProtect of code page failed");
1536 _ASSERTE( !patch->IsActivated() );
1537 _ASSERTE( patch->IsBound() );
1541 // bool DebuggerController::IsPatched() Is there a patch at addr?
1542 // How: if fNative && the instruction at addr is the break
1543 // instruction for this platform.
1544 bool DebuggerController::IsPatched(CORDB_ADDRESS_TYPE *address, BOOL native)
1546 LIMITED_METHOD_CONTRACT;
1548 return AddressIsBreakpoint(address);
1553 // DWORD DebuggerController::GetPatchedOpcode() Gets the opcode
1554 // at addr, 'looking underneath' any patches if needed.
1555 // GetPatchedInstruction is a function for the EE to call to "see through"
1556 // a patch to the opcodes which was patched.
1557 // How: Lock() grab opcode directly unless there's a patch, in
1558 // which case grab it out of the patch table.
1559 // BYTE * address: The address that we want to 'see through'
1560 // Returns: DWORD value, that is the opcode that should really be there,
1561 // if we hadn't placed a patch there. If we haven't placed a patch
1562 // there, then we'll see the actual opcode at that address.
1563 PRD_TYPE DebuggerController::GetPatchedOpcode(CORDB_ADDRESS_TYPE *address)
1565 _ASSERTE(g_patches != NULL);
1568 ZeroMemory(&opcode, sizeof(opcode));
1570 ControllerLockHolder lockController;
1573 // Look for a patch at the address
1576 DebuggerControllerPatch *patch = g_patches->GetPatch((CORDB_ADDRESS_TYPE *)address);
1580 // Since we got the patch at this address, is must by definition be bound to that address
1581 _ASSERTE( patch->IsBound() );
1582 _ASSERTE( patch->address == address );
1583 // If we're going to be returning it's opcode, then the patch must also be activated
1584 _ASSERTE( patch->IsActivated() );
1585 opcode = patch->opcode;
1590 // Patch was not found - it either is not our patch, or it has
1591 // just been removed. In either case, just return the current
1595 if (g_pEEInterface->IsManagedNativeCode((const BYTE *)address))
1597 opcode = CORDbgGetInstruction((CORDB_ADDRESS_TYPE *)address);
1600 // TODO: : determine if this is needed for AMD64
1602 #ifdef TARGET_X86 //what is this?!
1606 // !!! IL patch logic assumes reference encoding
1609 opcode = *(unsigned short*)(address+1);
1618 // Holding the controller lock, this will check if an address is patched,
1619 // and if so will then set the PRT_TYPE out parameter to the unpatched value.
1620 BOOL DebuggerController::CheckGetPatchedOpcode(CORDB_ADDRESS_TYPE *address,
1621 /*OUT*/ PRD_TYPE *pOpcode)
1630 _ASSERTE(g_patches != NULL);
1634 ControllerLockHolder lockController;
1637 // Look for a patch at the address
1640 if (IsAddressPatched(address))
1642 *pOpcode = GetPatchedOpcode(address);
1647 InitializePRD(pOpcode);
1655 // void DebuggerController::ActivatePatch() Place a breakpoint
1656 // so that threads will trip over this patch.
1657 // If there any patches at the address already, then copy
1658 // their opcode into this one & return. Otherwise,
1659 // call ApplyPatch(patch). There is an implicit list of patches at this
1660 // address by virtue of the fact that we can iterate through all the
1661 // patches in the patch with the same address.
1662 // DebuggerControllerPatch *patch: The patch to activate
1663 /* static */ void DebuggerController::ActivatePatch(DebuggerControllerPatch *patch)
1665 _ASSERTE(g_patches != NULL);
1666 _ASSERTE(patch != NULL);
1667 _ASSERTE(patch->IsBound() );
1668 _ASSERTE(!patch->IsActivated() );
1670 LOG((LF_CORDB|LF_ENC,LL_INFO1000,"DC::ActivatePatch: patchId:0x%zx\n", patch->patchId));
1671 patch->LogInstance();
1675 // See if we already have an active patch at this address.
1677 for (DebuggerControllerPatch *p = g_patches->GetPatch(patch->address);
1679 p = g_patches->GetNextPatch(p))
1683 // If we're going to skip activating 'patch' because 'p' already exists at the same address
1684 // then 'p' must be activated. We expect that all bound patches are activated.
1685 _ASSERTE( p->IsActivated() );
1686 LOG((LF_CORDB, LL_INFO10000, "DC::ActivatePatch: There is another patch at this address, no need to apply it.\n"));
1688 patch->opcode = p->opcode;
1695 // This is the only patch at this address - apply the patch
1703 _ASSERTE(patch->IsActivated());
1706 // void DebuggerController::DeactivatePatch() Make sure that a
1707 // patch won't be hit.
1708 // How: If this patch is the last one at this address, then
1709 // UnapplyPatch. The caller should then invoke RemovePatch to remove the
1710 // patch from the patch table.
1711 // DebuggerControllerPatch *patch: Patch to deactivate
1712 void DebuggerController::DeactivatePatch(DebuggerControllerPatch *patch)
1714 _ASSERTE(g_patches != NULL);
1715 _ASSERTE(patch != NULL);
1717 LOG((LF_CORDB|LF_ENC,LL_INFO1000,"DC::DeactivatePatch: patchId:0x%zx\n", patch->patchId));
1718 patch->LogInstance();
1719 if( !patch->IsBound() )
1722 // We expect that all bound patches are also activated.
1723 // One exception to this is if the shutdown thread killed another thread right after
1724 // if deactivated a patch but before it got to remove it.
1725 _ASSERTE(patch->IsActivated() );
1727 bool fUnapply = true;
1730 // See if we already have an active patch at this address.
1732 for (DebuggerControllerPatch *p = g_patches->GetPatch(patch->address);
1734 p = g_patches->GetNextPatch(p))
1738 // There is another patch at this address, so don't remove it
1739 // However, clear the patch data so that we no longer consider this particular patch activated
1740 LOG((LF_CORDB, LL_INFO10000, "DC::DeactivatePatch: There is another patch at this address, don't unapply it.\n"));
1743 InitializePRD(&(patch->opcode));
1750 UnapplyPatch(patch);
1753 _ASSERTE(!patch->IsActivated() );
1756 // Patch must now be removed from the table.
1760 // AddILPrimaryPatch: record a patch on IL code but do not bind it or activate it. The primary b.p.
1761 // is associated with a module/token pair. It is used later
1762 // (e.g. in MapAndBindFunctionPatches) to create one or more "replica"
1763 // breakpoints which are associated with particular MethodDescs/JitInfos.
1765 // Rationale: For generic code a single IL patch (e.g a breakpoint)
1766 // may give rise to several patches, one for each JITting of
1767 // the IL (i.e. generic code may be JITted multiple times for
1768 // different instantiations).
1770 // So we keep one patch which describes
1771 // the breakpoint but which is never actually bound or activated.
1772 // This is then used to apply new "replica" patches to all copies of
1773 // JITted code associated with the method.
1775 // <REVISIT_TODO>In theory we could bind and apply the primary patch when the
1776 // code is known not to be generic (as used to happen to all breakpoint
1777 // patches in V1). However this seems like a premature
1778 // optimization.</REVISIT_TODO>
1779 DebuggerControllerPatch *DebuggerController::AddILPrimaryPatch(Module *module,
1781 MethodDesc *pMethodDescFilter,
1794 _ASSERTE(g_patches != NULL);
1796 ControllerLockHolder ch;
1799 DebuggerControllerPatch *patch = g_patches->AddPatchForMethodDef(this,
1805 PATCH_KIND_IL_PRIMARY,
1811 LOG((LF_CORDB, LL_INFO10000,
1812 "DC::AP: Added IL primary patch %p for mdTok 0x%x, filter %p at %s offset 0x%zx encVersion %zx\n",
1813 patch, md, pMethodDescFilter, (offsetIsIL ? "IL" : "native"), offset, encVersion));
1818 // See notes above on AddILPrimaryPatch
1819 BOOL DebuggerController::AddBindAndActivateILReplicaPatch(DebuggerControllerPatch *primary,
1820 DebuggerJitInfo *dji)
1822 _ASSERTE(g_patches != NULL);
1823 _ASSERTE(primary->IsILPrimaryPatch());
1824 _ASSERTE(dji != NULL);
1826 BOOL result = FALSE;
1827 MethodDesc* pMD = dji->m_nativeCodeVersion.GetMethodDesc();
1829 if (primary->offsetIsIL == 0)
1831 // Zero is the only native offset that we allow to bind across different jitted
1833 _ASSERTE(primary->offset == 0);
1834 INDEBUG(BOOL fOk = )
1835 AddBindAndActivatePatchForMethodDesc(pMD, dji,
1836 0, PATCH_KIND_IL_REPLICA,
1837 LEAF_MOST_FRAME, m_pAppDomain);
1841 else // bind by IL offset
1843 // Do not dereference the "primary" pointer in the loop! The loop may add more patches,
1844 // causing the patch table to grow and move.
1845 SIZE_T primaryILOffset = primary->offset;
1847 // Loop through all the native offsets mapped to the given IL offset. On x86 the mapping
1848 // should be 1:1. On WIN64, because there are funclets, we have a 1:N mapping.
1849 DebuggerJitInfo::ILToNativeOffsetIterator it;
1850 for (dji->InitILToNativeOffsetIterator(it, primaryILOffset); !it.IsAtEnd(); it.Next())
1853 SIZE_T offsetNative = it.Current(&fExact);
1855 // We special case offset 0, which is when a breakpoint is set
1856 // at the beginning of a method that hasn't been jitted yet. In
1857 // that case it's possible that offset 0 has been optimized out,
1858 // but we still want to set the closest breakpoint to that.
1859 if (!fExact && (primaryILOffset != 0))
1861 LOG((LF_CORDB, LL_INFO10000, "DC::BP:Failed to bind patch in %s::%s at IL offset 0x%zx, native offset 0x%zx\n",
1862 pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, primaryILOffset, offsetNative));
1868 INDEBUG(BOOL fOk = )
1869 AddBindAndActivatePatchForMethodDesc(pMD, dji,
1870 offsetNative, PATCH_KIND_IL_REPLICA,
1871 LEAF_MOST_FRAME, m_pAppDomain);
1876 // As long as we have successfully bound at least one patch, we consider the operation successful.
1882 // This routine places a patch that is conceptually a patch on the IL code.
1883 // The IL code may be jitted multiple times, e.g. due to generics.
1884 // This routine ensures that both present and subsequent JITtings of code will
1887 // This routine will return FALSE only if we will _never_ be able to
1888 // place the patch in any native code corresponding to the given offset.
1889 // Otherwise it will:
1890 // (a) record a "primary" patch
1891 // (b) apply as many replica patches as it can to existing copies of code
1892 // that have debugging information
1893 BOOL DebuggerController::AddILPatch(AppDomain * pAppDomain, Module *module,
1895 MethodDesc *pMethodDescFilter,
1896 SIZE_T encVersion, // what encVersion does this apply to?
1900 _ASSERTE(g_patches != NULL);
1901 _ASSERTE(md != NULL);
1902 _ASSERTE(module != NULL);
1906 DebuggerMethodInfo *dmi = g_pDebugger->GetOrCreateMethodInfo(module, md); // throws
1907 LOG((LF_CORDB,LL_INFO10000,"DC::AILP: dmi:%p, mdToken:0x%x, mdFilter:%p, "
1908 "encVer:%zu, offset:0x%zx <- isIL:%s, Mod:%p\n",
1909 dmi, md, pMethodDescFilter, encVersion, offset, (offsetIsIL ? "true" : "false"), module));
1918 // OK, we either have (a) no code at all or (b) we have both JIT information and code
1920 // Either way, lay down the PrimaryPatch.
1922 // MapAndBindFunctionPatches will take care of any instantiations that haven't
1923 // finished JITting, by making a copy of the primary breakpoint.
1924 DebuggerControllerPatch *primary = AddILPrimaryPatch(module, md, pMethodDescFilter, offset, offsetIsIL, encVersion);
1926 // We have to keep the index here instead of the pointer. The loop below adds more patches,
1927 // which may cause the patch table to grow and move.
1928 ULONG primaryIndex = g_patches->GetItemIndex((HASHENTRY*)primary);
1930 // Iterate through every existing NativeCodeBlob (with the same EnC version).
1931 // This includes generics + prejitted code.
1932 DebuggerMethodInfo::DJIIterator it;
1933 dmi->IterateAllDJIs(pAppDomain, NULL /* module filter */, pMethodDescFilter, &it);
1937 // It is okay if we don't have any DJIs yet. It just means that the method hasn't been jitted.
1942 // On the other hand, if the method has been jitted, then we expect to be able to bind at least
1943 // one breakpoint. The exception is when we have multiple EnC versions of the method, in which
1944 // case it is ok if we don't bind any breakpoint. One scenario is when a method has been updated
1945 // via EnC but it's not yet jitted. We need to allow a debugger to put a breakpoint on the new
1946 // version of the method, but the new version won't have a DJI yet.
1947 BOOL fVersionMatch = FALSE;
1948 while(!it.IsAtEnd())
1950 DebuggerJitInfo *dji = it.Current();
1951 _ASSERTE(dji->m_jitComplete);
1952 if (dji->m_encVersion == encVersion &&
1953 (pMethodDescFilter == NULL || pMethodDescFilter == dji->m_nativeCodeVersion.GetMethodDesc()))
1955 fVersionMatch = TRUE;
1957 primary = (DebuggerControllerPatch *)g_patches->GetEntryPtr(primaryIndex);
1959 // <REVISIT_TODO> If we're missing JIT info for any then
1960 // we won't have applied the bp to every instantiation. That should probably be reported
1961 // as a new kind of condition to the debugger, i.e. report "bp only partially applied". It would be
1962 // a shame to completely fail just because on instantiation is missing debug info: e.g. just because
1963 // one component hasn't been prejitted with debugging information.</REVISIT_TODO>
1964 fOk = (AddBindAndActivateILReplicaPatch(primary, dji) || fOk);
1969 // This is the exceptional case referred to in the comment above. If we fail to put a breakpoint
1970 // because we don't have a matching version of the method, we need to return TRUE.
1971 if (fVersionMatch == FALSE)
1973 LOG((LF_CORDB,LL_INFO10000,"DC::AILP: No matching DebuggerJitInfo found\n"));
1982 EX_END_CATCH(SwallowAllExceptions)
1986 // Add a patch at native-offset 0 in the latest version of the method.
1987 // This is used by step-in.
1988 // Calls to new methods always go to the latest version, so EnC is not an issue here.
1989 // The method may be not yet jitted. Or it may be prejitted.
1990 void DebuggerController::AddPatchToStartOfLatestMethod(MethodDesc * fd)
1994 THROWS; // from GetJitInfo
1996 MODE_ANY; // don't really care what mode we're in.
1998 PRECONDITION(ThisMaybeHelperThread());
1999 PRECONDITION(CheckPointer(fd));
2003 _ASSERTE(g_patches != NULL);
2004 Module* pModule = fd->GetModule();
2005 mdToken defToken = fd->GetMemberDef();
2006 DebuggerMethodInfo* pDMI = g_pDebugger->GetOrCreateMethodInfo(pModule, defToken);
2007 DebuggerController::AddILPatch(GetAppDomain(), pModule, defToken, fd, pDMI->GetCurrentEnCVersion(), 0, FALSE);
2012 // Place patch in method at native offset.
2013 BOOL DebuggerController::AddBindAndActivateNativeManagedPatch(MethodDesc * fd,
2014 DebuggerJitInfo *dji,
2015 SIZE_T offsetNative,
2017 AppDomain *pAppDomain)
2021 THROWS; // from GetJitInfo
2023 MODE_ANY; // don't really care what mode we're in.
2025 PRECONDITION(ThisMaybeHelperThread());
2026 PRECONDITION(CheckPointer(fd));
2027 PRECONDITION(fd->IsDynamicMethod() || (dji != NULL));
2031 // For non-dynamic methods, we always expect to have a DJI, but just in case, we don't want the assert to AV.
2032 _ASSERTE((dji == NULL) || (fd == dji->m_nativeCodeVersion.GetMethodDesc()));
2033 _ASSERTE(g_patches != NULL);
2034 return DebuggerController::AddBindAndActivatePatchForMethodDesc(fd, dji, offsetNative, PATCH_KIND_NATIVE_MANAGED, fp, pAppDomain);
2037 // Adds a breakpoint at a specific native offset in a particular jitted code version
2038 BOOL DebuggerController::AddBindAndActivatePatchForMethodDesc(MethodDesc *fd,
2039 DebuggerJitInfo *dji,
2040 SIZE_T nativeOffset,
2041 DebuggerPatchKind kind,
2043 AppDomain *pAppDomain)
2049 MODE_ANY; // don't really care what mode we're in.
2051 PRECONDITION(ThisMaybeHelperThread());
2052 PRECONDITION(kind != PATCH_KIND_IL_PRIMARY);
2057 ControllerLockHolder ch;
2059 LOG((LF_CORDB|LF_ENC,LL_INFO10000,"DC::ABAAPFMD: Add to %s::%s, at offs 0x%zx kind:%d fp:%p AD:%p\n",
2060 fd->m_pszDebugClassName, fd->m_pszDebugMethodName, nativeOffset, kind, fp.GetSPValue(), pAppDomain));
2062 DebuggerControllerPatch *patch = g_patches->AddPatchForMethodDef(
2064 g_pEEInterface->MethodDescGetModule(fd),
2075 if (DebuggerController::BindPatch(patch, fd, NULL))
2077 DebuggerController::ActivatePatch(patch);
2085 // This version is particularly useful b/c it doesn't assume that the
2086 // patch is inside a managed method.
2087 DebuggerControllerPatch *DebuggerController::AddAndActivateNativePatchForAddress(CORDB_ADDRESS_TYPE *address,
2090 TraceType traceType)
2098 PRECONDITION(g_patches != NULL);
2103 ControllerLockHolder ch;
2105 DebuggerControllerPatch *patch
2106 = g_patches->AddPatchForAddress(this,
2109 (managed? PATCH_KIND_NATIVE_MANAGED : PATCH_KIND_NATIVE_UNMANAGED),
2114 DebuggerPatchTable::DCP_PATCHID_INVALID,
2117 ActivatePatch(patch);
2122 void DebuggerController::RemovePatchesFromModule(Module *pModule, AppDomain *pAppDomain )
2131 LOG((LF_CORDB, LL_INFO100000, "DPT::CPFM mod:0x%p (%s)\n",
2132 pModule, pModule->GetDebugName()));
2134 // First find all patches of interest
2135 DebuggerController::ControllerLockHolder ch;
2137 for (DebuggerControllerPatch *patch = g_patches->GetFirstPatch(&f);
2139 patch = g_patches->GetNextPatch(&f))
2141 // Skip patches not in the specified domain
2142 if ((pAppDomain != NULL) && (patch->pAppDomain != pAppDomain))
2145 BOOL fRemovePatch = FALSE;
2147 // Remove both native and IL patches the belong to this module
2148 if (patch->HasDJI())
2150 DebuggerJitInfo * dji = patch->GetDJI();
2152 _ASSERTE(patch->key.module == dji->m_nativeCodeVersion.GetMethodDesc()->GetModule());
2154 // It is not necessary to check for m_fd->GetModule() here. It will
2155 // be covered by other module unload notifications issued for the appdomain.
2156 if ( dji->m_pLoaderModule == pModule )
2157 fRemovePatch = TRUE;
2160 if (patch->key.module == pModule)
2162 fRemovePatch = TRUE;
2167 LOG((LF_CORDB, LL_EVERYTHING, "Removing patch 0x%p\n",
2169 // we shouldn't be both hitting this patch AND
2170 // unloading the module it belongs to.
2171 _ASSERTE(!patch->IsTriggering());
2172 ReleasePatch( patch );
2178 bool DebuggerController::ModuleHasPatches( Module* pModule )
2187 if( g_patches == NULL )
2189 // Patch table hasn't been initialized
2193 // First find all patches of interest
2195 for (DebuggerControllerPatch *patch = g_patches->GetFirstPatch(&f);
2197 patch = g_patches->GetNextPatch(&f))
2200 // This mirrors logic in code:DebuggerController::RemovePatchesFromModule
2203 if (patch->HasDJI())
2205 DebuggerJitInfo * dji = patch->GetDJI();
2207 _ASSERTE(patch->key.module == dji->m_nativeCodeVersion.GetMethodDesc()->GetModule());
2209 // It may be sufficient to just check m_pLoaderModule here. Since this is used for debug-only
2210 // check, we will check for m_fd->GetModule() as well to catch more potential problems.
2211 if ( (dji->m_pLoaderModule == pModule) || (dji->m_nativeCodeVersion.GetMethodDesc()->GetModule() == pModule) )
2217 if (patch->key.module == pModule)
2228 // Returns true if the given address is in an internal helper
2229 // function, false if its not.
2231 // This is a temporary workaround function to avoid having us stop in
2232 // unmanaged code belonging to the Runtime during a StepIn operation.
2234 static bool _AddrIsJITHelper(PCODE addr)
2236 #if !defined(HOST_64BIT) && !defined(TARGET_UNIX)
2237 // Is the address in the runtime dll (clr.dll or coreclr.dll) at all? (All helpers are in
2239 if (g_runtimeLoadedBaseAddress <= addr &&
2240 addr < g_runtimeLoadedBaseAddress + g_runtimeVirtualSize)
2242 for (int i = 0; i < CORINFO_HELP_COUNT; i++)
2244 if (hlpFuncTable[i].pfnHelper == (void*)addr)
2246 LOG((LF_CORDB, LL_INFO10000,
2247 "_ANIM: address of helper function found: 0x%08x\n",
2253 for (unsigned d = 0; d < DYNAMIC_CORINFO_HELP_COUNT; d++)
2255 if (hlpDynamicFuncTable[d].pfnHelper == (void*)addr)
2257 LOG((LF_CORDB, LL_INFO10000,
2258 "_ANIM: address of helper function found: 0x%08x\n",
2264 LOG((LF_CORDB, LL_INFO10000,
2265 "_ANIM: address within runtime dll, but not a helper function "
2268 #else // !defined(HOST_64BIT) && !defined(TARGET_UNIX)
2269 // TODO: Figure out what we want to do here
2270 #endif // !defined(HOST_64BIT) && !defined(TARGET_UNIX)
2275 // bool DebuggerController::PatchTrace() What: Invoke
2276 // AddPatch depending on the type of the given TraceDestination.
2277 // How: Invokes AddPatch based on the trace type: TRACE_OTHER will
2278 // return false, the others will obtain args for a call to an AddPatch
2279 // method & return true.
2281 // Return true if we set a patch, else false
2282 bool DebuggerController::PatchTrace(TraceDestination *trace,
2284 bool fStopInUnmanaged)
2288 THROWS; // Because AddPatch may throw on oom. We may want to convert this to nothrow and return false.
2290 DISABLED(GC_TRIGGERS); // @todo - what should this be?
2292 PRECONDITION(ThisMaybeHelperThread());
2295 DebuggerControllerPatch *dcp = NULL;
2296 SIZE_T nativeOffset = 0;
2298 switch (trace->GetTraceType())
2300 case TRACE_ENTRY_STUB: // fall through
2301 case TRACE_UNMANAGED:
2302 LOG((LF_CORDB, LL_INFO10000,
2303 "DC::PT: Setting unmanaged trace patch at 0x%p(%p)\n",
2304 trace->GetAddress(), fp.GetSPValue()));
2306 if (fStopInUnmanaged && !_AddrIsJITHelper(trace->GetAddress()))
2308 AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)trace->GetAddress(),
2311 trace->GetTraceType());
2316 LOG((LF_CORDB, LL_INFO10000, "DC::PT: decided to NOT "
2317 "place a patch in unmanaged code\n"));
2322 LOG((LF_CORDB, LL_INFO10000,
2323 "Setting managed trace patch at 0x%p(%p)\n", trace->GetAddress(), fp.GetSPValue()));
2326 fd = g_pEEInterface->GetNativeCodeMethodDesc(trace->GetAddress());
2329 DebuggerJitInfo *dji;
2330 dji = g_pDebugger->GetJitInfoFromAddr(trace->GetAddress());
2331 //_ASSERTE(dji); //we'd like to assert this, but attach won't work
2333 nativeOffset = CodeRegionInfo::GetCodeRegionInfo(dji, fd).AddressToOffset((const BYTE *)trace->GetAddress());
2335 // Code versioning allows calls to be redirected to alternate code potentially after this trace is complete but before
2336 // execution reaches the call target. Rather than bind the breakpoint to a specific jitted code instance that is currently
2337 // configured to receive execution we need to prepare for that potential retargeting by binding all jitted code instances.
2339 // Triggering this based of the native offset is a little subtle, but all of the stubmanagers follow a rule that if they
2340 // trace across a call boundary into jitted code they either stop at offset zero of the new method, or they continue tracing
2341 // out of that jitted code.
2342 if (nativeOffset == 0)
2344 AddPatchToStartOfLatestMethod(fd);
2348 AddBindAndActivateNativeManagedPatch(fd, dji, nativeOffset, fp, NULL);
2354 case TRACE_UNJITTED_METHOD:
2355 // trace->address is actually a MethodDesc* of the method that we'll
2356 // soon JIT, so put a relative bp at offset zero in.
2357 LOG((LF_CORDB, LL_INFO10000,
2358 "Setting unjitted method patch in MethodDesc %p %s\n", trace->GetMethodDesc(), trace->GetMethodDesc() ? trace->GetMethodDesc()->m_pszDebugMethodName : ""));
2360 // Note: we have to make sure to bind here. If this function is prejitted, this may be our only chance to get a
2361 // DebuggerJITInfo and thereby cause a JITComplete callback.
2362 AddPatchToStartOfLatestMethod(trace->GetMethodDesc());
2365 case TRACE_FRAME_PUSH:
2366 LOG((LF_CORDB, LL_INFO10000,
2367 "Setting frame patch at 0x%p(%p)\n", trace->GetAddress(), fp.GetSPValue()));
2369 AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)trace->GetAddress(),
2375 case TRACE_MGR_PUSH:
2376 LOG((LF_CORDB, LL_INFO10000,
2377 "Setting frame patch (TRACE_MGR_PUSH) at 0x%p(%p)\n",
2378 trace->GetAddress(), fp.GetSPValue()));
2380 dcp = AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)trace->GetAddress(),
2381 LEAF_MOST_FRAME, // But Mgr_push can't have fp affinity!
2383 DPT_DEFAULT_TRACE_TYPE); // TRACE_OTHER
2384 // Now copy over the trace field since TriggerPatch will expect this
2385 // to be set for this case.
2388 dcp->trace = *trace;
2394 LOG((LF_CORDB, LL_INFO10000,
2395 "Can't set a trace patch for TRACE_OTHER...\n"));
2404 //-----------------------------------------------------------------------------
2405 // Checks if the patch matches the context + thread.
2406 // Multiple patches can exist at a single address, so given a patch at the
2407 // Context's current address, this does additional patch-affinity checks like
2408 // thread, AppDomain, and frame-pointer.
2409 // thread - thread executing the given context that hit the patch
2410 // context - context of the thread that hit the patch
2411 // patch - candidate patch that we're looking for a match.
2413 // True if the patch matches.
2415 //-----------------------------------------------------------------------------
2416 bool DebuggerController::MatchPatch(Thread *thread,
2418 DebuggerControllerPatch *patch)
2420 LOG((LF_CORDB, LL_INFO100000, "DC::MP: EIP:0x%p\n", GetIP(context)));
2422 // Caller should have already matched our addresses.
2423 if (patch->address != dac_cast<PTR_CORDB_ADDRESS_TYPE>(GetIP(context)))
2428 // <BUGNUM>RAID 67173 -</BUGNUM> we'll make sure that intermediate patches have NULL
2429 // pAppDomain so that we don't end up running to completion when
2430 // the appdomain switches halfway through a step.
2431 if (patch->pAppDomain != NULL)
2433 AppDomain *pAppDomainCur = thread->GetDomain();
2435 if (pAppDomainCur != patch->pAppDomain)
2437 LOG((LF_CORDB, LL_INFO10000, "DC::MP: patches didn't match b/c of "
2443 if (patch->controller->m_thread != NULL && patch->controller->m_thread != thread)
2445 LOG((LF_CORDB, LL_INFO10000, "DC::MP: patches didn't match b/c threads\n"));
2449 if (patch->fp != LEAF_MOST_FRAME)
2451 // If we specified a Frame-pointer, than it should have been safe to take a stack trace.
2453 ControllerStackInfo info;
2454 StackTraceTicket ticket(patch);
2455 info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, context);
2457 // !!! This check should really be != , but there is some ambiguity about which frame is the parent frame
2458 // in the destination returned from Frame::TraceFrame, so this allows some slop there.
2460 if (info.HasReturnFrame() && IsCloserToLeaf(info.GetReturnFrame().fp, patch->fp))
2462 LOG((LF_CORDB, LL_INFO10000, "Patch hit but frame not matched at %p (current=%p, patch=%p)\n",
2463 patch->address, info.GetReturnFrame().fp.GetSPValue(), patch->fp.GetSPValue()));
2469 LOG((LF_CORDB, LL_INFO100000, "DC::MP: Returning true\n"));
2474 DebuggerPatchSkip *DebuggerController::ActivatePatchSkip(Thread *thread,
2479 BOOL shouldBreak = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ActivatePatchSkip);
2480 if (shouldBreak > 0) {
2481 _ASSERTE(!"ActivatePatchSkip");
2485 LOG((LF_CORDB,LL_INFO10000, "DC::APS thread=0x%p pc=0x%p fForEnc=%d\n",
2486 thread, PC, fForEnC));
2487 _ASSERTE(g_patches != NULL);
2489 // Previously, we assumed that if we got to this point & the patch
2490 // was still there that we'd have to skip the patch. SetIP changes
2492 // A breakpoint is set, and hit (but not removed), and all the
2493 // EE threads come to a skreeching halt. The Debugger RC thread
2494 // continues along, and is told to SetIP of the thread that hit
2495 // the BP to whatever. Eventually the RC thread is told to continue,
2496 // and at that point the EE thread is released, finishes DispatchPatchOrSingleStep,
2497 // and shows up here.
2498 // At that point, if the thread's current PC is
2499 // different from the patch PC, then SetIP must have moved it elsewhere
2500 // & we shouldn't do this patch skip (which will put us back to where
2501 // we were, which is clearly wrong). If the PC _is_ the same, then
2502 // the thread hasn't been moved, the patch is still in the code stream,
2503 // and we want to do the patch skip thing in order to execute this
2504 // instruction w/o removing it from the code stream.
2506 DebuggerControllerPatch *patch = g_patches->GetPatch((CORDB_ADDRESS_TYPE *)PC);
2507 DebuggerPatchSkip *skip = NULL;
2509 if (patch != NULL && patch->IsNativePatch())
2512 // We adjust the thread's PC to someplace where we write
2513 // the next instruction, then
2514 // we single step over that, then we set the PC back here so
2515 // we don't let other threads race past here while we're stepping
2519 LOG((LF_CORDB,LL_INFO10000, "DC::APS: About to skip from PC=0x%p\n", PC));
2520 skip = new (interopsafe) DebuggerPatchSkip(thread, patch, thread->GetDomain());
2527 DPOSS_ACTION DebuggerController::ScanForTriggers(CORDB_ADDRESS_TYPE *address,
2530 DebuggerControllerQueue *pDcq,
2531 SCAN_TRIGGER stWhat,
2536 // @todo - should this throw or not?
2539 // call Triggers which may invoke GC stuff... See comment in DispatchNativeException for why it's disabled.
2540 DISABLED(GC_TRIGGERS);
2541 PRECONDITION(!ThisIsHelperThreadWorker());
2543 PRECONDITION(CheckPointer(address));
2544 PRECONDITION(CheckPointer(thread));
2545 PRECONDITION(CheckPointer(context));
2546 PRECONDITION(CheckPointer(pDcq));
2547 PRECONDITION(CheckPointer(pTpr));
2551 _ASSERTE(HasLock());
2553 CONTRACT_VIOLATION(ThrowsViolation);
2555 LOG((LF_CORDB, LL_INFO10000, "DC::SFT: starting scan for addr:%p thread:%p\n",
2558 _ASSERTE( pTpr != NULL );
2559 DebuggerControllerPatch *patch = NULL;
2561 if (g_patches != NULL)
2562 patch = g_patches->GetPatch(address);
2564 ULONG iEvent = UINT32_MAX;
2565 ULONG iEventNext = UINT32_MAX;
2568 // This is a debugger exception if there's a patch here, or
2569 // we're here for something like a single step.
2570 DPOSS_ACTION used = DPOSS_INVALID;
2571 if ((patch != NULL) || !IsPatched(address, TRUE))
2573 // we are sure that we care for this exception but not sure
2574 // if we will send event to the RS
2575 used = DPOSS_USED_WITH_NO_EVENT;
2579 // initialize it to don't care for now
2580 used = DPOSS_DONT_CARE;
2583 TP_RESULT tpr = TPR_IGNORE;
2585 while (stWhat & ST_PATCH &&
2589 _ASSERTE(IsInUsedAction(used) == true);
2591 DebuggerControllerPatch *patchNext
2592 = g_patches->GetNextPatch(patch);
2594 LOG((LF_CORDB, LL_INFO10000, "DC::SFT: patch:%p, patchNext:%p\n", patch, patchNext));
2596 // Annoyingly, TriggerPatch may add patches, which may cause
2597 // the patch table to move, which may, in turn, invalidate
2598 // the patch (and patchNext) pointers. Store indices, instead.
2599 iEvent = g_patches->GetItemIndex( (HASHENTRY *)patch );
2601 if (patchNext != NULL)
2603 iEventNext = g_patches->GetItemIndex((HASHENTRY *)patchNext);
2606 if (MatchPatch(thread, context, patch))
2608 LOG((LF_CORDB, LL_INFO10000, "DC::SFT: patch matched\n"));
2611 // We are hitting a patch at a virtual trace call target, so let's trigger trace call here.
2612 if (patch->trace.GetTraceType() == TRACE_ENTRY_STUB)
2614 patch->controller->TriggerTraceCall(thread, dac_cast<PTR_CBYTE>(::GetIP(context)));
2619 // Mark if we're at an unsafe place.
2620 AtSafePlaceHolder unsafePlaceHolder(thread);
2622 tpr = patch->controller->TriggerPatch(patch,
2627 // Any patch may potentially send an event.
2628 // (Whereas some single-steps are "internal-only" and can
2629 // never send an event- such as a single step over an exception that
2630 // lands us in la-la land.)
2631 used = DPOSS_USED_WITH_EVENT;
2633 if (tpr == TPR_TRIGGER ||
2634 tpr == TPR_TRIGGER_ONLY_THIS ||
2635 tpr == TPR_TRIGGER_ONLY_THIS_AND_LOOP)
2637 // Make sure we've still got a valid pointer.
2638 patch = (DebuggerControllerPatch *)
2639 DebuggerController::g_patches->GetEntryPtr( iEvent );
2641 pDcq->dcqEnqueue(patch->controller, TRUE); // <REVISIT_TODO>@todo Return value</REVISIT_TODO>
2644 // Make sure we've got a valid pointer in case TriggerPatch
2645 // returned false but still caused the table to move.
2646 patch = (DebuggerControllerPatch *)
2647 g_patches->GetEntryPtr( iEvent );
2649 // A patch can be deleted as a result of it's being triggered.
2650 // The actual deletion of the patch is delayed until after the
2651 // the end of the trigger.
2652 // Moreover, "patchNext" could have been deleted as a result of DisableAll()
2653 // being called in TriggerPatch(). Thus, we should update our patchNext
2654 // pointer now. We were just lucky before, because the now-deprecated
2655 // "deleted" flag didn't get set when we iterate the patches in DisableAll().
2656 patchNext = g_patches->GetNextPatch(patch);
2657 if (patchNext != NULL)
2658 iEventNext = g_patches->GetItemIndex((HASHENTRY *)patchNext);
2660 // Note that ReleasePatch() actually removes the patch if its ref count
2661 // reaches 0 after the release.
2662 ReleasePatch(patch);
2665 if (tpr == TPR_IGNORE_AND_STOP ||
2666 tpr == TPR_TRIGGER_ONLY_THIS ||
2667 tpr == TPR_TRIGGER_ONLY_THIS_AND_LOOP)
2670 if (tpr == TPR_TRIGGER_ONLY_THIS ||
2671 tpr == TPR_TRIGGER_ONLY_THIS_AND_LOOP)
2672 _ASSERTE(pDcq->dcqGetCount() == 1);
2677 else if (patchNext != NULL)
2679 patch = (DebuggerControllerPatch *)
2680 g_patches->GetEntryPtr(iEventNext);
2688 #ifdef FEATURE_DATABREAKPOINT
2689 if (stWhat & ST_SINGLE_STEP &&
2690 tpr != TPR_TRIGGER_ONLY_THIS &&
2691 DebuggerDataBreakpoint::IsDataBreakpoint(thread, context))
2693 if (g_pDebugger->m_isSuspendedForGarbageCollection)
2695 // The debugger is not interested in Data Breakpoints during garbage collection
2696 // We can safely ignore them since the Data Breakpoints are now on pinned objects
2697 LOG((LF_CORDB, LL_INFO10000, "D:DDBP: Ignoring data breakpoint while suspended for GC \n"));
2699 used = DPOSS_USED_WITH_NO_EVENT;
2701 else if(DebuggerDataBreakpoint::TriggerDataBreakpoint(thread, context))
2703 DebuggerDataBreakpoint *pDataBreakpoint = new (interopsafe) DebuggerDataBreakpoint(thread);
2704 pDcq->dcqEnqueue(pDataBreakpoint, FALSE);
2709 if (stWhat & ST_SINGLE_STEP &&
2710 tpr != TPR_TRIGGER_ONLY_THIS)
2712 LOG((LF_CORDB, LL_INFO10000, "DC::SFT: Trigger controllers with single step\n"));
2715 // Now, go ahead & trigger all controllers with
2716 // single step events
2719 DebuggerController *p;
2724 DebuggerController *pNext = p->m_next;
2726 if (p->m_thread == thread && p->m_singleStep)
2728 if (used == DPOSS_DONT_CARE)
2730 // Debugger does care for this exception.
2731 used = DPOSS_USED_WITH_NO_EVENT;
2734 if (p->TriggerSingleStep(thread, (const BYTE *)address))
2736 // by now, we should already know that we care for this exception.
2737 _ASSERTE(IsInUsedAction(used) == true);
2739 // now we are sure that we will send event to the RS
2740 used = DPOSS_USED_WITH_EVENT;
2741 pDcq->dcqEnqueue(p, FALSE); // <REVISIT_TODO>@todo Return value</REVISIT_TODO>
2749 UnapplyTraceFlag(thread);
2752 // See if we have any steppers still active for this thread, if so
2753 // re-apply the trace flag.
2759 if (p->m_thread == thread && p->m_singleStep)
2761 ApplyTraceFlag(thread);
2769 // Significant speed increase from single dereference, I bet :)
2772 LOG((LF_CORDB, LL_INFO10000, "DC::SFT: returning 0x%x as used\n",used));
2776 #ifdef EnC_SUPPORTED
2777 // This function will check for an EnC patch at the given address and return
2778 // it if one is there, otherwise it will return NULL.
2779 DebuggerControllerPatch *DebuggerController::GetEnCPatch(const BYTE *address)
2783 LOG((LF_CORDB|LF_ENC,LL_INFO1000,"DC:GEnCP at %p\n", address));
2784 if( g_pEEInterface->IsManagedNativeCode(address) )
2786 LOG((LF_CORDB|LF_ENC,LL_INFO1000,"DC:GEnCP address is managed code\n"));
2787 DebuggerJitInfo *dji = g_pDebugger->GetJitInfoFromAddr((TADDR) address);
2793 // we can have two types of patches - one in code where the IL has been updated to trigger
2794 // the switch and the other in the code we've switched to in order to trigger FunctionRemapComplete
2795 // callback. If version == default then can't be the latter, but otherwise if haven't handled the
2796 // remap for this function yet is certainly the latter.
2797 if (! dji->m_encBreakpointsApplied
2798 && (dji->m_encVersion == CorDB_DEFAULT_ENC_FUNCTION_VERSION))
2804 // Look for EnC patch
2805 DebuggerControllerPatch *patch = g_patches->GetPatch((CORDB_ADDRESS_TYPE *)address);
2806 LOG((LF_CORDB|LF_ENC,LL_INFO1000,"DC:GEnCP Searching, beginning patch: %p\n", patch));
2808 while (patch != NULL)
2810 // Patches are ordered by DEBUGGER_CONTROLLER_TYPE value
2811 DEBUGGER_CONTROLLER_TYPE dct = patch->controller->GetDCType();
2812 if ((int)dct > (int)DEBUGGER_CONTROLLER_ENC)
2815 if (dct == DEBUGGER_CONTROLLER_ENC
2816 && patch->IsNativePatch())
2820 patch = g_patches->GetNextPatch(patch);
2825 #endif //EnC_SUPPORTED
2827 // DebuggerController::DispatchPatchOrSingleStep - Ask any patches that are active at a given
2828 // address if they want to do anything about the exception that's occurred there. How: For the given
2829 // address, go through the list of patches & see if any of them are interested (by invoking their
2830 // DebuggerController's TriggerPatch). Put any DCs that are interested into a queue and then calls
2831 // SendEvent on each.
2832 // Note that control will not return from this function in the case of EnC remap
2833 DPOSS_ACTION DebuggerController::DispatchPatchOrSingleStep(Thread *thread, CONTEXT *context, CORDB_ADDRESS_TYPE *address, SCAN_TRIGGER which)
2835 CONTRACT(DPOSS_ACTION)
2837 // @todo - should this throw or not?
2839 DISABLED(GC_TRIGGERS); // Only GC triggers if we send an event. See Comment in DispatchNativeException
2840 PRECONDITION(!ThisIsHelperThreadWorker());
2842 PRECONDITION(CheckPointer(thread));
2843 PRECONDITION(CheckPointer(context));
2844 PRECONDITION(CheckPointer(address));
2845 PRECONDITION(!HasLock());
2847 POSTCONDITION(!HasLock()); // make sure we're not leaking the controller lock
2851 CONTRACT_VIOLATION(ThrowsViolation);
2853 LOG((LF_CORDB|LF_ENC,LL_INFO1000,"DC:DPOSS at 0x%p trigger:0x%x\n", address, which));
2855 // We should only have an exception if some managed thread was running.
2856 // Thus we should never be here when we're stopped.
2857 // @todo - this assert fires! Is that an issue, or is it invalid?
2858 //_ASSERTE(!g_pDebugger->IsStopped());
2859 DPOSS_ACTION used = DPOSS_DONT_CARE;
2861 DebuggerControllerQueue dcq;
2862 if (!g_patchTableValid)
2865 LOG((LF_CORDB|LF_ENC, LL_INFO1000, "DC::DPOSS returning, no patch table.\n"));
2868 _ASSERTE(g_patches != NULL);
2870 CrstHolderWithState lockController(&g_criticalSection);
2872 TADDR originalAddress = 0;
2874 #ifdef EnC_SUPPORTED
2875 DebuggerControllerPatch *dcpEnCOriginal = NULL;
2877 // If this sequence point has an EnC patch, we want to process it ahead of any others. If the
2878 // debugger wants to remap the function at this point, then we'll call ResumeInUpdatedFunction and
2879 // not return, otherwise we will just continue with regular patch-handling logic
2880 dcpEnCOriginal = GetEnCPatch(dac_cast<PTR_CBYTE>(GetIP(context)));
2881 if (dcpEnCOriginal != NULL)
2883 LOG((LF_CORDB|LF_ENC,LL_INFO10000, "DC::DPOSS EnC short-circuit\n"));
2885 dcpEnCOriginal->controller->TriggerPatch(dcpEnCOriginal,
2889 // We will only come back here on a RemapOpportunity that wasn't taken, or on a RemapComplete.
2890 // If we processed a RemapComplete (which returns TPR_IGNORE_AND_STOP), then don't want to handle
2891 // additional breakpoints on the current line because we've already effectively executed to that point
2892 // and would have hit them already. If they are new, we also don't want to hit them because eg. if are
2893 // sitting on line 10 and add a breakpoint at line 10 and step,
2894 // don't expect to stop at line 10, expect to go to line 11.
2896 // Special case is if an EnC remap breakpoint exists in the function. This could only happen if the function was
2897 // updated between the RemapOpportunity and the RemapComplete. In that case we want to not skip the patches
2898 // and fall through to handle the remap breakpoint.
2900 if (tpres == TPR_IGNORE_AND_STOP)
2902 // It was a RemapComplete, so fall through. Set dcpEnCOriginal to NULL to indicate that any
2903 // EnC patch still there should be treated as a new patch. Any RemapComplete patch will have been
2904 // already removed by patch processing.
2905 dcpEnCOriginal = NULL;
2906 LOG((LF_CORDB|LF_ENC,LL_INFO10000, "DC::DPOSS done EnC short-circuit, exiting\n"));
2907 used = DPOSS_USED_WITH_EVENT; // indicate that we handled a patch
2911 _ASSERTE(tpres==TPR_IGNORE);
2912 LOG((LF_CORDB|LF_ENC,LL_INFO10000, "DC::DPOSS done EnC short-circuit, ignoring\n"));
2913 // if we got here, then the EnC remap opportunity was not taken, so just continue on.
2915 #endif // EnC_SUPPORTED
2919 used = ScanForTriggers((CORDB_ADDRESS_TYPE *)address, thread, context, &dcq, which, &tpr);
2921 LOG((LF_CORDB|LF_ENC, LL_EVERYTHING, "DC::DPOSS ScanForTriggers called and returned.\n"));
2924 // If we setip, then that will change the address in the context.
2925 // Remeber the old address so that we can compare it to the context's ip and see if it changed.
2926 // If it did change, then don't dispatch our current event.
2927 originalAddress = (TADDR) address;
2930 // If we do a SetIP after this point, the value of address will be garbage. Set it to a distictive pattern now, so
2931 // we don't accidentally use what will (98% of the time) appear to be a valid value.
2932 address = (CORDB_ADDRESS_TYPE *)(UINT_PTR)0xAABBCCFF;
2935 if (dcq.dcqGetCount()> 0)
2937 lockController.Release();
2939 // Mark if we're at an unsafe place.
2940 bool atSafePlace = g_pDebugger->IsThreadAtSafePlace(thread);
2942 g_pDebugger->IncThreadsAtUnsafePlaces();
2944 DWORD dwEvent = 0xFFFFFFFF;
2945 DWORD dwNumberEvents = 0;
2947 SENDIPCEVENT_BEGIN(g_pDebugger, thread);
2949 // Now that we've resumed from blocking, check if somebody did a SetIp on us.
2950 bool fIpChanged = (originalAddress != GetIP(context));
2952 // Send the events outside of the controller lock
2953 bool anyEventsSent = false;
2955 dwNumberEvents = dcq.dcqGetCount();
2958 while (dwEvent < dwNumberEvents)
2960 DebuggerController *event = dcq.dcqGetElement(dwEvent);
2962 if (!event->m_deleted)
2964 #ifdef DEBUGGING_SUPPORTED
2965 if (thread->GetDomain()->IsDebuggerAttached())
2967 if (event->SendEvent(thread, fIpChanged))
2969 anyEventsSent = true;
2972 #endif //DEBUGGING_SUPPORTED
2978 // Trap all threads if necessary, but only if we actually sent a event up (i.e., all the queued events weren't
2979 // deleted before we got a chance to get the EventSending lock.)
2982 LOG((LF_CORDB|LF_ENC, LL_EVERYTHING, "DC::DPOSS We sent an event\n"));
2983 g_pDebugger->SyncAllThreads(SENDIPCEVENT_PtrDbgLockHolder);
2984 LOG((LF_CORDB,LL_INFO1000, "SAT called!\n"));
2990 g_pDebugger->DecThreadsAtUnsafePlaces();
2992 lockController.Acquire();
2994 // Dequeue the events while we have the controller lock.
2996 while (dwEvent < dwNumberEvents)
3003 #if defined EnC_SUPPORTED
3007 // Note: if the thread filter context is NULL, then SetIP would have failed & thus we should do the
3008 // patch skip thing.
3009 // @todo - do we need to get the context again here?
3010 CONTEXT *pCtx = GetManagedLiveCtx(thread);
3012 #ifdef EnC_SUPPORTED
3013 DebuggerControllerPatch *dcpEnCCurrent = GetEnCPatch(dac_cast<PTR_CBYTE>((GetIP(context))));
3015 // we have a new patch if the original was null and the current is non-null. Otherwise we have an old
3016 // patch. We want to skip old patches, but handle new patches.
3017 if (dcpEnCOriginal == NULL && dcpEnCCurrent != NULL)
3019 LOG((LF_CORDB|LF_ENC,LL_INFO10000, "DC::DPOSS EnC post-processing\n"));
3020 dcpEnCCurrent->controller->TriggerPatch( dcpEnCCurrent,
3023 used = DPOSS_USED_WITH_EVENT; // indicate that we handled a patch
3027 ActivatePatchSkip(thread, dac_cast<PTR_CBYTE>(GetIP(pCtx)), FALSE);
3029 lockController.Release();
3032 // We pulse the GC mode here too cooperate w/ a thread trying to suspend the runtime. If we didn't pulse
3033 // the GC, the odds of catching this thread in interruptible code may be very small (since this filter
3034 // could be very large compared to the managed code this thread is running).
3035 // Only do this if the exception was actually for the debugger. (We don't want to toggle the GC mode on every
3036 // random exception). We can't do this while holding any debugger locks.
3037 if (used == DPOSS_USED_WITH_EVENT)
3039 bool atSafePlace = g_pDebugger->IsThreadAtSafePlace(thread);
3042 g_pDebugger->IncThreadsAtUnsafePlaces();
3045 // Always pulse the GC mode. This will allow an async break to complete even if we have a patch
3046 // at an unsafe place.
3047 // If we are at an unsafe place, then we can't do a GC.
3048 thread->PulseGCMode();
3052 g_pDebugger->DecThreadsAtUnsafePlaces();
3060 bool DebuggerController::IsSingleStepEnabled()
3062 LIMITED_METHOD_CONTRACT;
3063 return m_singleStep;
3066 void DebuggerController::EnableSingleStep()
3076 // Some controllers don't need to set the SS to do their job, and if they are setting it, it's likely an issue.
3077 // So we assert here to catch them red-handed. This assert can always be updated to accommodate changes
3078 // in a controller's behavior.
3082 case DEBUGGER_CONTROLLER_THREAD_STARTER:
3083 case DEBUGGER_CONTROLLER_BREAKPOINT:
3084 case DEBUGGER_CONTROLLER_USER_BREAKPOINT:
3085 case DEBUGGER_CONTROLLER_FUNC_EVAL_COMPLETE:
3086 CONSISTENCY_CHECK_MSGF(false, ("Controller pThis=%p shouldn't be setting ss flag.", this));
3088 default: // MingW compilers require all enum cases to be handled in switch statement.
3093 EnableSingleStep(m_thread);
3094 m_singleStep = true;
3097 #ifdef EnC_SUPPORTED
3098 // Note that this doesn't tell us if Single Stepping is currently enabled
3099 // at the hardware level (ie, for x86, if (context->EFlags & 0x100), but
3100 // rather, if we WANT single stepping enabled (pThread->m_State &Thread::TS_DebuggerIsStepping)
3101 // This gets called from exactly one place - ActivatePatchSkipForEnC
3102 BOOL DebuggerController::IsSingleStepEnabled(Thread *pThread)
3111 // This should be an atomic operation, do we
3112 // don't need to lock it.
3113 if(pThread->m_StateNC & Thread::TSNC_DebuggerIsStepping)
3115 _ASSERTE(pThread->m_StateNC & Thread::TSNC_DebuggerIsStepping);
3122 #endif //EnC_SUPPORTED
3124 void DebuggerController::EnableSingleStep(Thread *pThread)
3133 LOG((LF_CORDB,LL_INFO1000, "DC::EnableSingleStep\n"));
3135 _ASSERTE(pThread != NULL);
3137 ControllerLockHolder lockController;
3139 ApplyTraceFlag(pThread);
3142 // Disable Single stepping for this controller.
3143 // If none of the controllers on this thread want single-stepping, then also
3144 // ensure that it's disabled on the hardware level.
3145 void DebuggerController::DisableSingleStep()
3154 _ASSERTE(m_thread != NULL);
3156 LOG((LF_CORDB,LL_INFO1000, "DC::DisableSingleStep\n"));
3158 ControllerLockHolder lockController;
3160 DebuggerController *p = g_controllers;
3162 m_singleStep = false;
3166 if (p->m_thread == m_thread
3175 UnapplyTraceFlag(m_thread);
3182 // ApplyTraceFlag sets the trace flag (i.e., turns on single-stepping)
3185 void DebuggerController::ApplyTraceFlag(Thread *thread)
3187 LOG((LF_CORDB,LL_INFO1000, "DC::ApplyTraceFlag thread:0x%p [0x%0x]\n", thread, Debugger::GetThreadIdHelper(thread)));
3190 if(thread->GetInteropDebuggingHijacked())
3192 context = GetManagedLiveCtx(thread);
3196 context = GetManagedStoppedCtx(thread);
3198 CONSISTENCY_CHECK_MSGF(context != NULL, ("Can't apply ss flag to thread 0x%p b/c it's not in a safe place.\n", thread));
3199 PREFIX_ASSUME(context != NULL);
3202 g_pEEInterface->MarkThreadForDebugStepping(thread, true);
3203 LOG((LF_CORDB,LL_INFO1000, "DC::ApplyTraceFlag marked thread for debug stepping\n"));
3205 SetSSFlag(reinterpret_cast<DT_CONTEXT *>(context) ARM_ARG(thread) ARM64_ARG(thread) RISCV64_ARG(thread));
3209 // UnapplyTraceFlag sets the trace flag for a thread.
3210 // Removes the hardware trace flag on this thread.
3213 void DebuggerController::UnapplyTraceFlag(Thread *thread)
3215 LOG((LF_CORDB,LL_INFO1000, "DC::UnapplyTraceFlag thread:0x%p\n", thread));
3218 // Either this is the helper thread, or we're manipulating our own context.
3220 ThisIsHelperThreadWorker() ||
3221 (thread == ::GetThreadNULLOk())
3224 CONTEXT *context = GetManagedStoppedCtx(thread);
3226 // If there's no context available, then the thread shouldn't have the single-step flag
3227 // enabled and there's nothing for us to do.
3228 if (context == NULL)
3230 // In theory, I wouldn't expect us to ever get here.
3231 // Even if we are here, our single-step flag should already be deactivated,
3232 // so there should be nothing to do. However, we still assert b/c we want to know how
3233 // we'd actually hit this.
3234 // @todo - is there a path if TriggerUnwind() calls DisableAll(). But why would
3235 CONSISTENCY_CHECK_MSGF(false, ("How did we get here?. thread=%p\n", thread));
3236 LOG((LF_CORDB,LL_INFO1000, "DC::UnapplyTraceFlag couldn't get context.\n"));
3240 // Always need to unmark for stepping
3241 g_pEEInterface->MarkThreadForDebugStepping(thread, false);
3242 UnsetSSFlag(reinterpret_cast<DT_CONTEXT *>(context) ARM_ARG(thread) ARM64_ARG(thread) RISCV64_ARG(thread));
3245 void DebuggerController::EnableExceptionHook()
3254 _ASSERTE(m_thread != NULL);
3256 ControllerLockHolder lockController;
3258 m_exceptionHook = true;
3261 void DebuggerController::DisableExceptionHook()
3270 _ASSERTE(m_thread != NULL);
3272 ControllerLockHolder lockController;
3273 m_exceptionHook = false;
3277 // void DebuggerController::DispatchExceptionHook() Called before
3278 // the switch statement in DispatchNativeException (therefore
3279 // when any exception occurs), this allows patches to do something before the
3280 // regular DispatchX methods.
3281 // How: Iterate through list of controllers. If m_exceptionHook
3282 // is set & m_thread is either thread or NULL, then invoke TriggerExceptionHook()
3283 BOOL DebuggerController::DispatchExceptionHook(Thread *thread,
3285 EXCEPTION_RECORD *pException)
3287 // ExceptionHook has restrictive contract b/c it could come from anywhere.
3288 // This can only modify controller's internal state. Can't send managed debug events.
3295 // Filter context not set yet b/c we can only set it in COOP, and this may be in preemptive.
3296 PRECONDITION(thread == ::GetThreadNULLOk());
3297 PRECONDITION((g_pEEInterface->GetThreadFilterContext(thread) == NULL));
3298 PRECONDITION(CheckPointer(pException));
3302 LOG((LF_CORDB, LL_INFO1000, "DC::DEH: DispatchExceptionHook\n"));
3304 if (!g_patchTableValid)
3306 LOG((LF_CORDB, LL_INFO1000, "DC::DEH: returning, no patch table.\n"));
3311 _ASSERTE(g_patches != NULL);
3313 ControllerLockHolder lockController;
3315 TP_RESULT tpr = TPR_IGNORE;
3316 DebuggerController *p;
3321 DebuggerController *pNext = p->m_next;
3323 if (p->m_exceptionHook
3324 && (p->m_thread == NULL || p->m_thread == thread)
3325 && tpr != TPR_IGNORE_AND_STOP)
3327 LOG((LF_CORDB, LL_INFO1000, "DC::DEH: calling TEH...\n"));
3328 tpr = p->TriggerExceptionHook(thread, context , pException);
3329 LOG((LF_CORDB, LL_INFO1000, "DC::DEH: ... returned.\n"));
3331 if (tpr == TPR_IGNORE_AND_STOP)
3333 LOG((LF_CORDB, LL_INFO1000, "DC::DEH: leaving early!\n"));
3341 LOG((LF_CORDB, LL_INFO1000, "DC::DEH: returning 0x%x!\n", tpr));
3343 return (tpr != TPR_IGNORE_AND_STOP);
3347 // EnableUnwind enables an unwind event to be called when the stack is unwound
3348 // (via an exception) to or past the given pointer.
3351 void DebuggerController::EnableUnwind(FramePointer fp)
3360 ASSERT(m_thread != NULL);
3361 LOG((LF_CORDB,LL_EVERYTHING,"DC:EU EnableUnwind at %p\n", fp.GetSPValue()));
3363 ControllerLockHolder lockController;
3367 FramePointer DebuggerController::GetUnwind()
3369 LIMITED_METHOD_CONTRACT;
3375 // DisableUnwind disables the unwind event for the controller.
3378 void DebuggerController::DisableUnwind()
3389 ASSERT(m_thread != NULL);
3391 LOG((LF_CORDB,LL_INFO1000, "DC::DU\n"));
3393 ControllerLockHolder lockController;
3395 m_unwindFP = LEAF_MOST_FRAME;
3399 // DispatchUnwind is called when an unwind happens.
3400 // the event to the appropriate controllers.
3401 // - handlerFP is the frame pointer that the handler will be invoked at.
3402 // - DJI is EnC-aware method that the handler is in.
3403 // - newOffset is the
3405 bool DebuggerController::DispatchUnwind(Thread *thread,
3406 MethodDesc *fd, DebuggerJitInfo * pDJI,
3408 FramePointer handlerFP,
3409 CorDebugStepReason unwindReason)
3414 GC_NOTRIGGER; // don't send IPC events
3415 MODE_COOPERATIVE; // TriggerUnwind always is coop
3417 PRECONDITION(!IsDbgHelperSpecialThread());
3422 CONTRACT_VIOLATION(ThrowsViolation); // trigger unwind throws
3424 _ASSERTE(unwindReason == STEP_EXCEPTION_FILTER || unwindReason == STEP_EXCEPTION_HANDLER);
3428 LOG((LF_CORDB, LL_INFO10000, "DC: Dispatch Unwind\n"));
3430 ControllerLockHolder lockController;
3432 DebuggerController *p;
3438 DebuggerController *pNext = p->m_next;
3440 if (p->m_thread == thread && p->m_unwindFP != LEAF_MOST_FRAME)
3442 LOG((LF_CORDB, LL_INFO10000, "Dispatch Unwind: Found candidate\n"));
3445 // Assumptions here:
3446 // Function with handlers are -ALWAYS- EBP-frame based (JIT assumption)
3448 // newFrame is the EBP for the handler
3449 // p->m_unwindFP points to the stack slot with the return address of the function.
3451 // For the interesting case: stepover, we want to know if the handler is in the same function
3452 // as the stepper, if its above it (caller) o under it (callee) in order to know if we want
3453 // to patch the handler or not.
3457 // a) Handler is in a function under the function where the step happened. It therefore is
3458 // a stepover. We don't want to patch this handler. The handler will have an EBP frame.
3459 // So it will be at least be 2 DWORDs away from the m_unwindFP of the controller (
3460 // 1 DWORD from the pushed return address and 1 DWORD for the push EBP).
3462 // b) Handler is in the same function as the stepper. We want to patch the handler. In this
3463 // case handlerFP will be the same as p->m_unwindFP-sizeof(void*). Why? p->m_unwindFP
3464 // stores a pointer to the return address of the function. As a function with a handler
3465 // is always EBP frame based it will have the following code in the prolog:
3467 // push ebp <- ( sub esp, 4 ; mov [esp], ebp )
3470 // Therefore EBP will be equal to &CallerReturnAddress-4.
3472 // c) Handler is above the function where the stepper is. We want to patch the handler. handlerFP
3473 // will be always greater than the pointer to the return address of the function where the
3479 if (IsEqualOrCloserToRoot(handlerFP, p->m_unwindFP))
3484 // Assume that this isn't going to block us at all --
3485 // other threads may be waiting to patch or unpatch something,
3488 LOG((LF_CORDB, LL_INFO10000,
3489 "Unwind trigger at offset 0x%p; handlerFP: 0x%p unwindReason: 0x%x.\n",
3490 newOffset, handlerFP.GetSPValue(), unwindReason));
3492 p->TriggerUnwind(thread,
3500 LOG((LF_CORDB, LL_INFO10000,
3501 "Unwind trigger at offset 0x%p; handlerFP: 0x%p unwindReason: 0x%x.\n",
3502 newOffset, handlerFP.GetSPValue(), unwindReason));
3514 // EnableTraceCall enables a call event on the controller
3515 // maxFrame is the leaf-most frame that we want notifications for.
3516 // For step-in stuff, this will always be LEAF_MOST_FRAME.
3517 // for step-out, this will be the current frame because we don't
3518 // care if the current frame calls back into managed code when we're
3519 // only interested in our parent frames.
3522 void DebuggerController::EnableTraceCall(FramePointer maxFrame)
3531 ASSERT(m_thread != NULL);
3533 LOG((LF_CORDB,LL_INFO1000, "DC::ETC maxFrame=0x%x, thread=0x%x\n",
3534 maxFrame.GetSPValue(), Debugger::GetThreadIdHelper(m_thread)));
3536 // JMC stepper should never enabled this. (They should enable ME instead).
3537 _ASSERTE((DEBUGGER_CONTROLLER_JMC_STEPPER != this->GetDCType()) || !"JMC stepper shouldn't enable trace-call");
3540 ControllerLockHolder lockController;
3545 g_pEEInterface->EnableTraceCall(m_thread);
3548 if (IsCloserToLeaf(maxFrame, m_traceCallFP))
3549 m_traceCallFP = maxFrame;
3553 struct PatchTargetVisitorData
3555 DebuggerController* controller;
3556 FramePointer maxFrame;
3559 VOID DebuggerController::PatchTargetVisitor(TADDR pVirtualTraceCallTarget, VOID* pUserData)
3568 DebuggerController* controller = ((PatchTargetVisitorData*) pUserData)->controller;
3569 FramePointer maxFrame = ((PatchTargetVisitorData*) pUserData)->maxFrame;
3573 CONTRACT_VIOLATION(GCViolation); // PatchTrace throws, which implies GC-triggers
3574 TraceDestination trace;
3575 trace.InitForUnmanagedStub(pVirtualTraceCallTarget);
3576 controller->PatchTrace(&trace, maxFrame, true);
3580 // not much we can do here
3582 EX_END_CATCH(SwallowAllExceptions)
3586 // DisableTraceCall disables call events on the controller
3589 void DebuggerController::DisableTraceCall()
3598 ASSERT(m_thread != NULL);
3600 ControllerLockHolder lockController;
3604 LOG((LF_CORDB,LL_INFO1000, "DC::DTC thread=0x%x\n",
3605 Debugger::GetThreadIdHelper(m_thread)));
3607 g_pEEInterface->DisableTraceCall(m_thread);
3609 m_traceCall = false;
3610 m_traceCallFP = ROOT_MOST_FRAME;
3615 // Get a FramePointer for the leafmost frame on this thread's stacktrace.
3616 // It's tempting to create this off the head of the Frame chain, but that may
3617 // include internal EE Frames (like GCRoot frames) which a FrameInfo-stackwalk may skip over.
3618 // Thus using the Frame chain would err on the side of returning a FramePointer that
3619 // closer to the leaf.
3620 FramePointer GetCurrentFramePointerFromStackTraceForTraceCall(Thread * thread)
3622 _ASSERTE(thread != NULL);
3624 // Ensure this is really the same as CSI.
3625 ControllerStackInfo info;
3627 // It's possible this stackwalk may be done at an unsafe time.
3628 // this method may trigger a GC, for example, in
3629 // FramedMethodFrame::AskStubForUnmanagedCallSite
3630 // which will trash the incoming argument array
3631 // which is not gc-protected.
3633 // We could probably imagine a more specialized stackwalk that
3634 // avoids these calls and is thus GC_NOTRIGGER.
3635 CONTRACT_VIOLATION(GCViolation);
3637 // This is being run live, so there's no filter available.
3639 context = g_pEEInterface->GetThreadFilterContext(thread);
3640 _ASSERTE(context == NULL);
3641 _ASSERTE(!ISREDIRECTEDTHREAD(thread));
3643 // This is actually safe because we're coming from a TraceCall, which
3644 // means we're not in the middle of a stub. We don't have some partially
3645 // constructed frame, so we can safely traverse the stack.
3646 // However, we may still have a problem w/ the GC-violation.
3647 StackTraceTicket ticket(StackTraceTicket::SPECIAL_CASE_TICKET);
3648 info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, NULL);
3650 FramePointer fp = info.m_activeFrame.fp;
3655 // DispatchTraceCall is called when a call is traced in the EE
3656 // It dispatches the event to the appropriate controllers.
3659 bool DebuggerController::DispatchTraceCall(Thread *thread,
3671 LOG((LF_CORDB, LL_INFO10000,
3672 "DC::DTC: TraceCall at 0x%x\n", ip));
3674 ControllerLockHolder lockController;
3676 DebuggerController *p;
3681 DebuggerController *pNext = p->m_next;
3683 if (p->m_thread == thread && p->m_traceCall)
3687 if (p->m_traceCallFP == LEAF_MOST_FRAME)
3691 // We know we don't have a filter context, so get a frame pointer from our frame chain.
3692 FramePointer fpToCheck = GetCurrentFramePointerFromStackTraceForTraceCall(thread);
3697 // Currently, we never ever put a patch in an IL stub, and as such, if the IL stub
3698 // throws an exception after returning from unmanaged code, we would not trigger
3699 // a trace call when we call the constructor of the exception. The following is
3700 // kind of a workaround to make that working. If we ever make the change to stop in
3701 // IL stubs (for example, if we start to share security IL stub), then this can be
3708 // It's possible this stackwalk may be done at an unsafe time.
3709 // this method may trigger a GC, for example, in
3710 // FramedMethodFrame::AskStubForUnmanagedCallSite
3711 // which will trash the incoming argument array
3712 // which is not gc-protected.
3713 ControllerStackInfo info;
3715 CONTRACT_VIOLATION(GCViolation);
3717 CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
3719 _ASSERTE(context == NULL);
3720 _ASSERTE(!ISREDIRECTEDTHREAD(thread));
3722 // See explanation in GetCurrentFramePointerFromStackTraceForTraceCall.
3723 StackTraceTicket ticket(StackTraceTicket::SPECIAL_CASE_TICKET);
3724 info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, NULL);
3727 if (info.m_activeFrame.chainReason == CHAIN_ENTER_UNMANAGED)
3729 _ASSERTE(info.HasReturnFrame());
3731 // This check makes sure that we don't do this logic for inlined frames.
3732 if (info.GetReturnFrame().md->IsILStub())
3734 // Make sure that the frame pointer of the active frame is actually
3735 // the address of an exit frame.
3736 _ASSERTE( (static_cast<Frame*>(info.m_activeFrame.fp.GetSPValue()))->GetFrameType()
3737 == Frame::TYPE_EXIT );
3738 _ASSERTE(!info.GetReturnFrame().HasChainMarker());
3739 fpToCheck = info.GetReturnFrame().fp;
3743 // @todo - This comparison seems somewhat nonsensical. We don't have a filter context
3744 // in place, so what frame pointer is fpToCheck actually for?
3745 trigger = IsEqualOrCloserToRoot(fpToCheck, p->m_traceCallFP);
3752 // This can only update controller's state, can't actually send IPC events.
3753 p->TriggerTraceCall(thread, ip);
3764 bool DebuggerController::IsMethodEnterEnabled()
3766 LIMITED_METHOD_CONTRACT;
3767 return m_fEnableMethodEnter;
3771 // Notify dispatching logic that this controller wants to get TriggerMethodEnter
3772 // We keep a count of total controllers waiting for MethodEnter (in g_cTotalMethodEnter).
3773 // That way we know if any controllers want MethodEnter callbacks. If none do,
3774 // then we can set the JMC probe flag to false for all modules.
3775 void DebuggerController::EnableMethodEnter()
3784 ControllerLockHolder chController;
3785 Debugger::DebuggerDataLockHolder chInfo(g_pDebugger);
3787 // Both JMC + Traditional steppers may use MethodEnter.
3788 // For JMC, it's a core part of functionality. For Traditional steppers, we use it as a backstop
3789 // in case the stub-managers fail.
3790 _ASSERTE(g_cTotalMethodEnter >= 0);
3791 if (!m_fEnableMethodEnter)
3793 LOG((LF_CORDB, LL_INFO1000000, "DC::EnableME, this=%p, previously disabled\n", this));
3794 m_fEnableMethodEnter = true;
3796 g_cTotalMethodEnter++;
3800 LOG((LF_CORDB, LL_INFO1000000, "DC::EnableME, this=%p, already set\n", this));
3802 g_pDebugger->UpdateAllModuleJMCFlag(g_cTotalMethodEnter != 0); // Needs JitInfo lock
3805 // Notify dispatching logic that this controller doesn't want to get
3806 // TriggerMethodEnter
3807 void DebuggerController::DisableMethodEnter()
3816 ControllerLockHolder chController;
3817 Debugger::DebuggerDataLockHolder chInfo(g_pDebugger);
3819 if (m_fEnableMethodEnter)
3821 LOG((LF_CORDB, LL_INFO1000000, "DC::DisableME, this=%p, previously set\n", this));
3822 m_fEnableMethodEnter = false;
3824 g_cTotalMethodEnter--;
3825 _ASSERTE(g_cTotalMethodEnter >= 0);
3829 LOG((LF_CORDB, LL_INFO1000000, "DC::DisableME, this=%p, already disabled\n", this));
3832 g_pDebugger->UpdateAllModuleJMCFlag(g_cTotalMethodEnter != 0); // Needs JitInfo lock
3835 // Loop through controllers and dispatch TriggerMethodEnter
3836 void DebuggerController::DispatchMethodEnter(void * pIP, FramePointer fp)
3838 _ASSERTE(pIP != NULL);
3840 Thread * pThread = g_pEEInterface->GetThread();
3841 _ASSERTE(pThread != NULL);
3843 // Lookup the DJI for this method & ip.
3844 // Since we create DJIs when we jit the code, and this code has been jitted
3845 // (that's where the probe's coming from!), we will have a DJI.
3846 DebuggerJitInfo * dji = g_pDebugger->GetJitInfoFromAddr((TADDR) pIP);
3848 // This includes the case where we have a LightWeight codegen method.
3854 LOG((LF_CORDB, LL_INFO100000, "DC::DispatchMethodEnter for '%s::%s'\n",
3855 dji->m_nativeCodeVersion.GetMethodDesc()->m_pszDebugClassName,
3856 dji->m_nativeCodeVersion.GetMethodDesc()->m_pszDebugMethodName));
3858 ControllerLockHolder lockController;
3860 // For debug check, keep a count to make sure that g_cTotalMethodEnter
3861 // is actually the number of controllers w/ MethodEnter enabled.
3864 DebuggerController *p = g_controllers;
3867 if (p->m_fEnableMethodEnter)
3869 if ((p->GetThread() == NULL) || (p->GetThread() == pThread))
3872 p->TriggerMethodEnter(pThread, dji, (const BYTE *) pIP, fp);
3878 _ASSERTE(g_cTotalMethodEnter == count);
3883 // AddProtection adds page protection to (at least) the given range of
3887 void DebuggerController::AddProtection(const BYTE *start, const BYTE *end,
3891 _ASSERTE(!"Not implemented yet");
3895 // RemoveProtection removes page protection from the given
3896 // addresses. The parameters should match an earlier call to
3900 void DebuggerController::RemoveProtection(const BYTE *start, const BYTE *end,
3904 _ASSERTE(!"Not implemented yet");
3908 // Default implementations for FuncEvalEnter & Exit notifications.
3909 void DebuggerController::TriggerFuncEvalEnter(Thread * thread)
3911 LOG((LF_CORDB, LL_INFO100000, "DC::TFEEnter, thead=%p, this=%p\n", thread, this));
3914 void DebuggerController::TriggerFuncEvalExit(Thread * thread)
3916 LOG((LF_CORDB, LL_INFO100000, "DC::TFEExit, thead=%p, this=%p\n", thread, this));
3919 // bool DebuggerController::TriggerPatch() What: Tells the
3920 // static DC whether this patch should be activated now.
3921 // Returns true if it should be, false otherwise.
3922 // How: Base class implementation returns false. Others may
3924 TP_RESULT DebuggerController::TriggerPatch(DebuggerControllerPatch *patch,
3928 LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerPatch\n"));
3932 bool DebuggerController::TriggerSingleStep(Thread *thread,
3935 LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerSingleStep\n"));
3939 void DebuggerController::TriggerUnwind(Thread *thread,
3940 MethodDesc *fd, DebuggerJitInfo * pDJI, SIZE_T offset,
3942 CorDebugStepReason unwindReason)
3944 LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerUnwind\n"));
3947 void DebuggerController::TriggerTraceCall(Thread *thread,
3950 LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerTraceCall\n"));
3953 TP_RESULT DebuggerController::TriggerExceptionHook(Thread *thread, CONTEXT * pContext,
3954 EXCEPTION_RECORD *exception)
3956 LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerExceptionHook\n"));
3960 void DebuggerController::TriggerMethodEnter(Thread * thread,
3961 DebuggerJitInfo * dji,
3965 LOG((LF_CORDB, LL_INFO10000, "DC::TME in default impl. dji=%p, addr=%p, fp=%p\n",
3966 dji, ip, fp.GetSPValue()));
3969 bool DebuggerController::SendEvent(Thread *thread, bool fIpChanged)
3974 SENDEVENT_CONTRACT_ITEMS;
3978 LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default SendEvent\n"));
3980 // If any derived class trigger SendEvent, it should also implement SendEvent.
3981 _ASSERTE(false || !"Base DebuggerController sending an event?");
3986 // Dispacth Func-Eval Enter & Exit notifications.
3987 void DebuggerController::DispatchFuncEvalEnter(Thread * thread)
3989 LOG((LF_CORDB, LL_INFO100000, "DC::DispatchFuncEvalEnter for thread 0x%p\n", thread));
3991 ControllerLockHolder lockController;
3993 DebuggerController *p = g_controllers;
3996 if ((p->GetThread() == NULL) || (p->GetThread() == thread))
3998 p->TriggerFuncEvalEnter(thread);
4007 void DebuggerController::DispatchFuncEvalExit(Thread * thread)
4009 LOG((LF_CORDB, LL_INFO100000, "DC::DispatchFuncEvalExit for thread 0x%p\n", thread));
4011 ControllerLockHolder lockController;
4013 DebuggerController *p = g_controllers;
4016 if ((p->GetThread() == NULL) || (p->GetThread() == thread))
4018 p->TriggerFuncEvalExit(thread);
4029 // See comment in DispatchNativeException
4030 void ThisFunctionMayHaveTriggerAGC()
4041 // bool DebuggerController::DispatchNativeException() Figures out
4042 // if any debugger controllers will handle the exception.
4043 // DispatchNativeException should be called by the EE when a native exception
4044 // occurs. If it returns true, the exception was generated by a Controller and
4045 // should be ignored.
4046 // How: Calls DispatchExceptionHook to see if anything is
4047 // interested in ExceptionHook, then does a switch on dwCode:
4048 // EXCEPTION_BREAKPOINT means invoke DispatchPatchOrSingleStep(ST_PATCH).
4049 // EXCEPTION_SINGLE_STEP means DispatchPatchOrSingleStep(ST_SINGLE_STEP).
4050 // EXCEPTION_ACCESS_VIOLATION means invoke DispatchAccessViolation.
4051 // Returns true if the exception was actually meant for the debugger,
4052 // returns false otherwise.
4053 bool DebuggerController::DispatchNativeException(EXCEPTION_RECORD *pException,
4062 // If this exception is for the debugger, then we may trigger a GC.
4063 // But we'll be called on _any_ exception, including ones in a GC-no-triggers region.
4064 // Our current contract system doesn't let us specify such conditions on GC_TRIGGERS.
4065 // So we disable it now, and if we find out the exception is meant for the debugger,
4066 // we'll call ThisFunctionMayHaveTriggerAGC() to ping that we're really a GC_TRIGGERS.
4067 DISABLED(GC_TRIGGERS); // Only GC triggers if we send an event,
4068 PRECONDITION(!IsDbgHelperSpecialThread());
4070 // If we're called from preemptive mode, than our caller has protected the stack.
4071 // If we're in cooperative mode, then we need to protect the stack before toggling GC modes
4072 // (by setting the filter-context)
4075 PRECONDITION(CheckPointer(pException));
4076 PRECONDITION(CheckPointer(pContext));
4077 PRECONDITION(CheckPointer(pCurThread));
4081 LOG((LF_CORDB, LL_EVERYTHING, "DispatchNativeException was called\n"));
4082 LOG((LF_CORDB, LL_INFO10000, "Native exception at 0x%p, code=0x%8x, context=0x%p, er=0x%p\n",
4083 pException->ExceptionAddress, dwCode, pContext, pException));
4088 DPOSS_ACTION result = DPOSS_DONT_CARE;
4091 // We have a potentially ugly locking problem here. This notification is called on any exception,
4092 // but we have no idea what our locking context is at the time. Thus we may hold locks smaller
4093 // than the controller lock.
4094 // The debugger logic really only cares about exceptions directly in managed code (eg, hardware exceptions)
4095 // or in patch-skippers (since that's a copy of managed code running in a look-aside buffer).
4096 // That should exclude all C++ exceptions, which are the common case if Runtime code throws an internal ex.
4097 // So we ignore those to avoid the lock violation.
4098 if (pException->ExceptionCode == EXCEPTION_MSVC)
4100 LOG((LF_CORDB, LL_INFO1000, "Debugger skipping for C++ exception.\n"));
4104 // The debugger really only cares about exceptions in managed code. Any exception that occurs
4105 // while the thread is redirected (such as EXCEPTION_HIJACK) is not of interest to the debugger.
4106 // Allowing this would be problematic because when an exception occurs while the thread is
4107 // redirected, we don't know which context (saved redirection context or filter context)
4108 // we should be operating on (see code:GetManagedStoppedCtx).
4109 if( ISREDIRECTEDTHREAD(pCurThread) )
4111 LOG((LF_CORDB, LL_INFO1000, "Debugger ignoring exception 0x%x on redirected thread.\n", dwCode));
4113 // We shouldn't be seeing debugging exceptions on a redirected thread. While a thread is
4114 // redirected we only call a few internal things (see code:Thread.RedirectedHandledJITCase),
4115 // and may call into the host. We can't call normal managed code or anything we'd want to debug.
4116 _ASSERTE(dwCode != EXCEPTION_BREAKPOINT);
4117 _ASSERTE(dwCode != EXCEPTION_SINGLE_STEP);
4122 // It's possible we're here without a debugger (since we have to call the
4123 // patch skippers). The Debugger may detach anytime,
4124 // so remember the attach state now.
4126 bool fWasAttached = false;
4127 #ifdef DEBUGGING_SUPPORTED
4128 fWasAttached = (CORDebuggerAttached() != 0);
4129 #endif //DEBUGGING_SUPPORTED
4133 // If we're in cooperative mode, it's unsafe to do a GC until we've put a filter context in place.
4136 // If we know the debugger doesn't care about this exception, bail now.
4137 // Usually this is just if there's a debugger attached.
4138 // However, if a debugger detached but left outstanding controllers (like patch-skippers),
4139 // we still may care.
4140 // The only way a controller would get created outside of the helper thread is from
4141 // a patch skipper, so we always handle breakpoints.
4142 if (!CORDebuggerAttached() && (g_controllers == NULL) && (dwCode != EXCEPTION_BREAKPOINT))
4147 FireEtwDebugExceptionProcessingStart();
4149 // We should never be here if the debugger was never involved.
4150 CONTEXT * pOldContext;
4151 pOldContext = pCurThread->GetFilterContext();
4153 // In most cases it is an error to nest, however in the patch-skipping logic we must
4154 // copy an unknown amount of code into another buffer and it occasionally triggers
4155 // an AV. This heuristic should filter that case out.
4156 // Ensure we perform this exception nesting filtering even before the call to
4157 // DebuggerController::DispatchExceptionHook, otherwise the nesting will continue when
4158 // a contract check is triggered in DispatchExceptionHook and another BP exception is
4160 if (pOldContext != NULL
4161 && pCurThread->AVInRuntimeImplOkay()
4162 && pException->ExceptionCode == STATUS_ACCESS_VIOLATION)
4164 STRESS_LOG1(LF_CORDB, LL_INFO100, "DC::DNE Nested Access Violation at %p is being ignored\n",
4165 pException->ExceptionAddress);
4168 // Otherwise it is an error to nest at all
4169 _ASSERTE(pOldContext == NULL);
4171 fDispatch = DebuggerController::DispatchExceptionHook(pCurThread,
4176 // Must be in cooperative mode to set the filter context. We know there are times we'll be in preemptive mode,
4177 // (such as M2U handoff, or potentially patches in the middle of a stub, or various random exceptions)
4179 // @todo - We need to worry about GC-protecting our stack. If we're in preemptive mode, the caller did it for us.
4180 // If we're in cooperative, then we need to set the FilterContext *before* we toggle GC mode (since
4181 // the FC protects the stack).
4182 // If we're in preemptive, then we need to set the FilterContext *after* we toggle ourselves to Cooperative.
4183 // Also note it may not be possible to toggle GC mode at these times (such as in the middle of the stub).
4185 // Part of the problem is that the Filter Context is serving 2 purposes here:
4186 // - GC protect the stack. (essential if we're in coop mode).
4187 // - provide info to controllers (such as current IP, and a place to set the Single-Step flag).
4189 // This contract violation is mitigated in that we must have had the debugger involved to get to this point.
4190 CONTRACT_VIOLATION(ModeViolation);
4191 g_pEEInterface->SetThreadFilterContext(pCurThread, pContext);
4193 // Now that we've set the filter context, we can let the GCX_NOTRIGGER expire.
4194 // It's still possible that we may be called from a No-trigger region.
4200 // Disable SingleStep for all controllers on this thread. This requires the filter context set.
4201 // This is what would disable the ss-flag when single-stepping over an AV.
4202 if (g_patchTableValid && (dwCode != EXCEPTION_SINGLE_STEP))
4204 LOG((LF_CORDB, LL_INFO1000, "DC::DNE non-single-step exception; check if any controller has ss turned on\n"));
4206 ControllerLockHolder lockController;
4207 for (DebuggerController* p = g_controllers; p != NULL; p = p->m_next)
4209 if (p->m_singleStep && (p->m_thread == pCurThread))
4211 LOG((LF_CORDB, LL_INFO1000, "DC::DNE turn off ss for controller 0x%p\n", p));
4212 p->DisableSingleStep();
4215 // implicit controller lock release
4218 CORDB_ADDRESS_TYPE * ip = dac_cast<PTR_CORDB_ADDRESS_TYPE>(GetIP(pContext));
4222 case EXCEPTION_BREAKPOINT:
4223 // EIP should be properly set up at this point.
4224 result = DebuggerController::DispatchPatchOrSingleStep(pCurThread,
4228 LOG((LF_CORDB, LL_EVERYTHING, "DC::DNE DispatchPatch call returned\n"));
4230 // If we detached, we should remove all our breakpoints. So if we try
4231 // to handle this breakpoint, make sure that we're attached.
4232 if (IsInUsedAction(result) == true)
4234 _ASSERTE(fWasAttached);
4238 case EXCEPTION_SINGLE_STEP:
4239 LOG((LF_CORDB, LL_EVERYTHING, "DC::DNE SINGLE_STEP Exception\n"));
4241 result = DebuggerController::DispatchPatchOrSingleStep(pCurThread,
4244 (SCAN_TRIGGER)(ST_PATCH|ST_SINGLE_STEP));
4245 // We pass patch | single step since single steps actually
4246 // do both (eg, you SS onto a breakpoint).
4257 LOG((LF_CORDB, LL_INFO1000, "DC:: DNE step-around fDispatch:0x%x!\n", fDispatch));
4261 fDebuggers = (fDispatch?(IsInUsedAction(result)?true:false):true);
4263 LOG((LF_CORDB, LL_INFO10000, "DC::DNE, returning 0x%x.\n", fDebuggers));
4266 if (fDebuggers && (result == DPOSS_USED_WITH_EVENT))
4268 // If the exception belongs to the debugger, then we may have sent an event,
4269 // and thus we may have triggered a GC.
4270 ThisFunctionMayHaveTriggerAGC();
4276 // Must restore the filter context. After the filter context is gone, we're
4277 // unprotected again and unsafe for a GC.
4279 CONTRACT_VIOLATION(ModeViolation);
4280 g_pEEInterface->SetThreadFilterContext(pCurThread, NULL);
4283 #ifdef FEATURE_EMULATE_SINGLESTEP
4284 if (pCurThread->IsSingleStepEnabled())
4285 pCurThread->ApplySingleStep(pContext);
4286 #endif // FEATURE_EMULATE_SINGLESTEP
4288 FireEtwDebugExceptionProcessingEnd();
4293 // * -------------------------------------------------------------------------
4294 // * DebuggerPatchSkip routines
4295 // * -------------------------------------------------------------------------
4297 DebuggerPatchSkip::DebuggerPatchSkip(Thread *thread,
4298 DebuggerControllerPatch *patch,
4299 AppDomain *pAppDomain)
4300 : DebuggerController(thread, pAppDomain),
4301 m_address(patch->address)
4303 LOG((LF_CORDB, LL_INFO10000,
4304 "DPS::DPS: Patch skip 0x%p\n", patch->address));
4306 // On ARM the single-step emulation already utilizes a per-thread execution buffer similar to the scheme
4307 // below. As a result we can skip most of the instruction parsing logic that's instead internalized into
4308 // the single-step emulation itself.
4309 #ifndef FEATURE_EMULATE_SINGLESTEP
4311 // NOTE: in order to correctly single-step RIP-relative writes on multiple threads we need to set up
4312 // a shared buffer with the instruction and a buffer for the RIP-relative value so that all threads
4313 // are working on the same copy. as the single-steps complete the modified data in the buffer is
4314 // copied back to the real address to ensure proper execution of the program.
4317 // Create the shared instruction block. this will also create the shared RIP-relative buffer
4320 m_pSharedPatchBypassBuffer = patch->GetOrCreateSharedPatchBypassBuffer();
4321 #if defined(HOST_OSX) && defined(HOST_ARM64)
4322 ExecutableWriterHolder<SharedPatchBypassBuffer> sharedPatchBypassBufferWriterHolder((SharedPatchBypassBuffer*)m_pSharedPatchBypassBuffer, sizeof(SharedPatchBypassBuffer));
4323 SharedPatchBypassBuffer *pSharedPatchBypassBufferRW = sharedPatchBypassBufferWriterHolder.GetRW();
4324 #else // HOST_OSX && HOST_ARM64
4325 SharedPatchBypassBuffer *pSharedPatchBypassBufferRW = m_pSharedPatchBypassBuffer;
4326 #endif // HOST_OSX && HOST_ARM64
4328 BYTE* patchBypassRX = m_pSharedPatchBypassBuffer->PatchBypass;
4329 BYTE* patchBypassRW = pSharedPatchBypassBufferRW->PatchBypass;
4330 LOG((LF_CORDB, LL_INFO10000, "DPS::DPS: Patch skip for opcode 0x%.4x at address %p buffer allocated at 0x%.8x\n", patch->opcode, patch->address, m_pSharedPatchBypassBuffer));
4332 // Copy the instruction block over to the patch skip
4333 // WARNING: there used to be an issue here because CopyInstructionBlock copied the breakpoint from the
4334 // jitted code stream into the patch buffer. Further below CORDbgSetInstruction would correct the
4335 // first instruction. This buffer is shared by all threads so if another thread executed the buffer
4336 // between this thread's execution of CopyInstructionBlock and CORDbgSetInstruction the wrong
4337 // code would be executed. The bug has been fixed by changing CopyInstructionBlock to only copy
4338 // the code bytes after the breakpoint.
4339 // You might be tempted to stop copying the code at all, however that wouldn't work well with rejit.
4340 // If we skip a breakpoint that is sitting at the beginning of a method, then the profiler rejits that
4341 // method causing a jump-stamp to be placed, then we skip the breakpoint again, we need to make sure
4342 // the 2nd skip executes the new jump-stamp code and not the original method prologue code. Copying
4343 // the code every time ensures that we have the most up-to-date version of the code in the buffer.
4344 _ASSERTE( patch->IsBound() );
4345 CopyInstructionBlock(patchBypassRW, (const BYTE *)patch->address);
4347 // Technically, we could create a patch skipper for an inactive patch, but we rely on the opcode being
4349 _ASSERTE( patch->IsActivated() );
4350 CORDbgSetInstruction((CORDB_ADDRESS_TYPE *)patchBypassRW, patch->opcode);
4352 LOG((LF_CORDB, LL_EVERYTHING, "SetInstruction was called\n"));
4354 // Look at instruction to get some attributes
4357 NativeWalker::DecodeInstructionForPatchSkip(patchBypassRX, &(m_instrAttrib));
4359 #if defined(TARGET_AMD64)
4361 // The code below handles RIP-relative addressing on AMD64. The original implementation made the assumption that
4362 // we are only using RIP-relative addressing to access read-only data (see VSW 246145 for more information). This
4363 // has since been expanded to handle RIP-relative writes as well.
4364 if (m_instrAttrib.m_dwOffsetToDisp != 0)
4366 _ASSERTE(m_instrAttrib.m_cbInstr != 0);
4369 // Populate the RIP-relative buffer with the current value if needed
4372 BYTE* bufferBypassRW = pSharedPatchBypassBufferRW->BypassBuffer;
4374 // Overwrite the *signed* displacement.
4375 int dwOldDisp = *(int*)(&patchBypassRX[m_instrAttrib.m_dwOffsetToDisp]);
4376 int dwNewDisp = offsetof(SharedPatchBypassBuffer, BypassBuffer) -
4377 (offsetof(SharedPatchBypassBuffer, PatchBypass) + m_instrAttrib.m_cbInstr);
4378 *(int*)(&patchBypassRW[m_instrAttrib.m_dwOffsetToDisp]) = dwNewDisp;
4380 // This could be an LEA, which we'll just have to change into a MOV and copy the original address.
4381 if (((patchBypassRX[0] == 0x4C) || (patchBypassRX[0] == 0x48)) && (patchBypassRX[1] == 0x8d))
4383 patchBypassRW[1] = 0x8b; // MOV reg, mem
4384 _ASSERTE((int)sizeof(void*) <= SharedPatchBypassBuffer::cbBufferBypass);
4385 *(void**)bufferBypassRW = (void*)(patch->address + m_instrAttrib.m_cbInstr + dwOldDisp);
4389 _ASSERTE(m_instrAttrib.m_cOperandSize <= SharedPatchBypassBuffer::cbBufferBypass);
4390 // Copy the data into our buffer.
4391 memcpy(bufferBypassRW, patch->address + m_instrAttrib.m_cbInstr + dwOldDisp, m_instrAttrib.m_cOperandSize);
4393 if (m_instrAttrib.m_fIsWrite)
4395 // save the actual destination address and size so when we TriggerSingleStep() we can update the value
4396 pSharedPatchBypassBufferRW->RipTargetFixup = (UINT_PTR)(patch->address + m_instrAttrib.m_cbInstr + dwOldDisp);
4397 pSharedPatchBypassBufferRW->RipTargetFixupSize = m_instrAttrib.m_cOperandSize;
4402 #endif // TARGET_AMD64
4404 #endif // !FEATURE_EMULATE_SINGLESTEP
4406 // Signals our thread that the debugger will be manipulating the context
4407 // during the patch skip operation. This effectively prevents other threads
4408 // from suspending us until we have completed skiping the patch and restored
4409 // a good context (See DDB 188816)
4410 thread->BeginDebuggerPatchSkip(this);
4413 // Set IP of context to point to patch bypass buffer
4416 T_CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
4417 _ASSERTE(!ISREDIRECTEDTHREAD(thread));
4419 if (context == NULL)
4421 // We can't play with our own context!
4423 if (g_pEEInterface->GetThread())
4425 // current thread is mamaged thread
4426 _ASSERTE(Debugger::GetThreadIdHelper(thread) != Debugger::GetThreadIdHelper(g_pEEInterface->GetThread()));
4430 c.ContextFlags = CONTEXT_CONTROL;
4432 thread->GetThreadContext(&c);
4433 context =(T_CONTEXT *) &c;
4435 ARM_ONLY(_ASSERTE(!"We should always have a filter context in DebuggerPatchSkip."));
4438 #ifdef FEATURE_EMULATE_SINGLESTEP
4439 // Since we emulate all single-stepping on ARM/ARM64 using an instruction buffer and a breakpoint all we have to
4440 // do here is initiate a normal single-step except that we pass the instruction to be stepped explicitly
4441 // (calling EnableSingleStep() would infer this by looking at the PC in the context, which would pick up
4442 // the patch we're trying to skip).
4444 // Ideally we'd refactor the EnableSingleStep to support this alternative calling sequence but since this
4445 // involves three levels of methods and is only applicable to ARM we've chosen to replicate the relevant
4446 // implementation here instead.
4448 ControllerLockHolder lockController;
4449 g_pEEInterface->MarkThreadForDebugStepping(thread, true);
4454 if (Is32BitInstruction(patch->opcode))
4456 opcode2 = CORDbgGetInstruction((CORDB_ADDRESS_TYPE *)(((DWORD)patch->address) + 2));
4458 #endif // TARGET_ARM
4460 thread->BypassWithSingleStep(patch->address, patch->opcode ARM_ARG(opcode2));
4461 m_singleStep = true;
4464 #else // FEATURE_EMULATE_SINGLESTEP
4467 patchBypassRX = NativeWalker::SetupOrSimulateInstructionForPatchSkip(context, m_pSharedPatchBypassBuffer, (const BYTE *)patch->address, patch->opcode);
4468 #endif //TARGET_ARM64
4470 //set eip to point to buffer...
4471 SetIP(context, (PCODE)patchBypassRX);
4473 if (context ==(T_CONTEXT*) &c)
4474 thread->SetThreadContext(&c);
4476 LOG((LF_CORDB, LL_INFO10000, "DPS::DPS Bypass at %p for opcode 0x%zx \n", patchBypassRX, patch->opcode));
4479 // Turn on single step (if the platform supports it) so we can
4480 // fix up state after the instruction is executed.
4481 // Also turn on exception hook so we can adjust IP in exceptions
4486 #endif // !FEATURE_EMULATE_SINGLESTEP
4488 EnableExceptionHook();
4491 DebuggerPatchSkip::~DebuggerPatchSkip()
4493 #ifndef FEATURE_EMULATE_SINGLESTEP
4494 _ASSERTE(m_pSharedPatchBypassBuffer);
4495 m_pSharedPatchBypassBuffer->Release();
4496 #endif // !FEATURE_EMULATE_SINGLESTEP
4499 void DebuggerPatchSkip::DebuggerDetachClean()
4501 // Since for ARM/ARM64 SharedPatchBypassBuffer isn't existed, we don't have to anything here.
4502 #ifndef FEATURE_EMULATE_SINGLESTEP
4503 // Fix for Bug 1176448
4504 // When a debugger is detaching from the debuggee, we need to move the IP if it is pointing
4505 // somewhere in PatchBypassBuffer.All managed threads are suspended during detach, so changing
4506 // the context without notifications is safe.
4508 // THIS FIX IS INCOMPLETE!It attempts to update the IP in the cases we can easily detect.However,
4509 // if a thread is in pre - emptive mode, and its filter context has been propagated to a VEH
4510 // context, then the filter context we get will be NULL and this fix will not work.Our belief is
4511 // that this scenario is rare enough that it doesnt justify the cost and risk associated with a
4512 // complete fix, in which we would have to either :
4513 // 1. Change the reference counting for DebuggerController and then change the exception handling
4514 // logic in the debuggee so that we can handle the debugger event after detach.
4515 // 2. Create a "stack walking" implementation for native code and use it to get the current IP and
4516 // set the IP to the right place.
4518 Thread *thread = GetThreadNULLOk();
4521 BYTE *patchBypass = m_pSharedPatchBypassBuffer->PatchBypass;
4522 CONTEXT *context = thread->GetFilterContext();
4523 if (patchBypass != NULL &&
4525 (size_t)GetIP(context) >= (size_t)patchBypass &&
4526 (size_t)GetIP(context) <= (size_t)(patchBypass + MAX_INSTRUCTION_LENGTH + 1))
4528 SetIP(context, (PCODE)((BYTE *)GetIP(context) - (patchBypass - (BYTE *)m_address)));
4531 #endif // !FEATURE_EMULATE_SINGLESTEP
4536 // We have to have a whole separate function for this because you
4537 // can't use __try in a function that requires object unwinding...
4540 LONG FilterAccessViolation2(LPEXCEPTION_POINTERS ep, PVOID pv)
4542 LIMITED_METHOD_CONTRACT;
4544 return (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
4545 ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH;
4548 // This helper is required because the AVInRuntimeImplOkayHolder can not
4549 // be directly placed inside the scope of a PAL_TRY
4550 void _CopyInstructionBlockHelper(BYTE* to, const BYTE* from)
4552 AVInRuntimeImplOkayHolder AVOkay;
4554 // This function only copies the portion of the instruction that follows the
4555 // breakpoint opcode, not the breakpoint itself
4556 to += CORDbg_BREAK_INSTRUCTION_SIZE;
4557 from += CORDbg_BREAK_INSTRUCTION_SIZE;
4559 // If an AV occurs because we walked off a valid page then we need
4560 // to be certain that all bytes on the previous page were copied.
4561 // We are certain that we copied enough bytes to contain the instruction
4562 // because it must have fit within the valid page.
4563 for (int i = 0; i < MAX_INSTRUCTION_LENGTH - CORDbg_BREAK_INSTRUCTION_SIZE; i++)
4570 // WARNING: this function skips copying the first CORDbg_BREAK_INSTRUCTION_SIZE bytes by design
4571 // See the comment at the callsite in DebuggerPatchSkip::DebuggerPatchSkip for more details on
4573 void DebuggerPatchSkip::CopyInstructionBlock(BYTE *to, const BYTE* from)
4575 // We wrap the memcpy in an exception handler to handle the
4576 // extremely rare case where we're copying an instruction off the
4577 // end of a method that is also at the end of a page, and the next
4578 // page is unmapped.
4586 PAL_TRY(Param *, pParam, ¶m)
4588 _CopyInstructionBlockHelper(pParam->to, pParam->from);
4590 PAL_EXCEPT_FILTER(FilterAccessViolation2)
4592 // The whole point is that if we copy up the AV, then
4593 // that's enough to execute, otherwise we would not have been
4594 // able to execute the code anyway. So we just ignore the
4596 LOG((LF_CORDB, LL_INFO10000,
4597 "DPS::DPS: AV copying instruction block ignored.\n"));
4601 // We just created a new buffer of code, but the CPU caches code and may
4602 // not be aware of our changes. This should force the CPU to dump any cached
4603 // instructions it has in this region and load the new ones from memory
4604 FlushInstructionCache(GetCurrentProcess(), to + CORDbg_BREAK_INSTRUCTION_SIZE,
4605 MAX_INSTRUCTION_LENGTH - CORDbg_BREAK_INSTRUCTION_SIZE);
4608 TP_RESULT DebuggerPatchSkip::TriggerPatch(DebuggerControllerPatch *patch,
4612 ARM_ONLY(_ASSERTE(!"Should not have called DebuggerPatchSkip::TriggerPatch."));
4613 LOG((LF_CORDB, LL_EVERYTHING, "DPS::TP called\n"));
4615 #if defined(_DEBUG) && !defined(FEATURE_EMULATE_SINGLESTEP)
4616 CONTEXT *context = GetManagedLiveCtx(thread);
4618 LOG((LF_CORDB, LL_INFO1000, "DPS::TP: We've patched 0x%x (byPass:0x%x) "
4619 "for a skip after an EnC update!\n", GetIP(context),
4620 GetBypassAddress()));
4621 _ASSERTE(g_patches != NULL);
4623 // We shouldn't have mucked with EIP, yet.
4624 _ASSERTE(dac_cast<PTR_CORDB_ADDRESS_TYPE>(GetIP(context)) == GetBypassAddress());
4626 //We should be the _only_ patch here
4627 MethodDesc *md2 = dac_cast<PTR_MethodDesc>(GetIP(context));
4628 DebuggerControllerPatch *patchCheck = g_patches->GetPatch(g_pEEInterface->MethodDescGetModule(md2),md2->GetMemberDef());
4629 _ASSERTE(patchCheck == patch);
4630 _ASSERTE(patchCheck->controller == patch->controller);
4632 patchCheck = g_patches->GetNextPatch(patchCheck);
4633 _ASSERTE(patchCheck == NULL);
4634 #endif // defined(_DEBUG) && !defined(FEATURE_EMULATE_SINGLESTEP)
4637 EnableExceptionHook();
4638 EnableSingleStep(); //gets us back to where we want.
4639 return TPR_IGNORE; // don't actually want to stop here....
4642 TP_RESULT DebuggerPatchSkip::TriggerExceptionHook(Thread *thread, CONTEXT * context,
4643 EXCEPTION_RECORD *exception)
4649 // Patch skippers only operate on patches set in managed code. But the infrastructure may have
4650 // toggled the GC mode underneath us.
4653 PRECONDITION(GetThreadNULLOk() == thread);
4654 PRECONDITION(thread != NULL);
4655 PRECONDITION(CheckPointer(context));
4659 if (m_pAppDomain != NULL)
4661 AppDomain *pAppDomainCur = thread->GetDomain();
4663 if (pAppDomainCur != m_pAppDomain)
4665 LOG((LF_CORDB,LL_INFO10000, "DPS::TEH: Appdomain mismatch - not skiiping!\n"));
4670 LOG((LF_CORDB,LL_INFO10000, "DPS::TEH: doing the patch-skip thing\n"));
4672 #if defined(TARGET_ARM64) && !defined(FEATURE_EMULATE_SINGLESTEP)
4674 if (!IsSingleStep(exception->ExceptionCode))
4676 LOG((LF_CORDB, LL_INFO10000, "Exception in patched Bypass instruction .\n"));
4677 return (TPR_IGNORE_AND_STOP);
4680 _ASSERTE(m_pSharedPatchBypassBuffer);
4681 BYTE* patchBypass = m_pSharedPatchBypassBuffer->PatchBypass;
4683 if (m_pSharedPatchBypassBuffer->RipTargetFixup)
4685 targetIp = m_pSharedPatchBypassBuffer->RipTargetFixup;
4689 targetIp = (PCODE)((BYTE *)GetIP(context) - (patchBypass - (BYTE *)m_address));
4692 SetIP(context, targetIp);
4693 LOG((LF_CORDB, LL_ALWAYS, "Redirecting after Patch to 0x%p\n", GetIP(context)));
4695 #elif defined(FEATURE_EMULATE_SINGLESTEP)
4699 _ASSERTE(m_pSharedPatchBypassBuffer);
4700 BYTE* patchBypass = m_pSharedPatchBypassBuffer->PatchBypass;
4702 if (m_instrAttrib.m_fIsCall && IsSingleStep(exception->ExceptionCode))
4704 // Fixup return address on stack
4705 #if defined(TARGET_X86) || defined(TARGET_AMD64)
4706 SIZE_T *sp = (SIZE_T *) GetSP(context);
4708 LOG((LF_CORDB, LL_INFO10000,
4709 "Bypass call return address redirected from %p\n", *sp));
4711 *sp -= patchBypass - (BYTE*)m_address;
4713 LOG((LF_CORDB, LL_INFO10000, "to %p\n", *sp));
4715 PORTABILITY_ASSERT("DebuggerPatchSkip::TriggerExceptionHook -- return address fixup NYI");
4719 if (!m_instrAttrib.m_fIsAbsBranch || !IsSingleStep(exception->ExceptionCode))
4723 LOG((LF_CORDB, LL_INFO10000, "Bypass instruction redirected from %p\n", GetIP(context)));
4725 if (IsSingleStep(exception->ExceptionCode))
4728 // Check if the current IP is anywhere near the exception dispatcher logic.
4729 // If it is, ignore the exception, as the real exception is coming next.
4730 static FARPROC pExcepDispProc = NULL;
4732 if (!pExcepDispProc)
4734 HMODULE hNtDll = WszGetModuleHandle(W("ntdll.dll"));
4738 pExcepDispProc = GetProcAddress(hNtDll, "KiUserExceptionDispatcher");
4740 if (!pExcepDispProc)
4741 pExcepDispProc = (FARPROC)(size_t)(-1);
4744 pExcepDispProc = (FARPROC)(size_t)(-1);
4747 _ASSERTE(pExcepDispProc != NULL);
4749 if ((size_t)pExcepDispProc != (size_t)(-1))
4751 LPVOID pExcepDispEntryPoint = pExcepDispProc;
4753 if ((size_t)GetIP(context) > (size_t)pExcepDispEntryPoint &&
4754 (size_t)GetIP(context) <= ((size_t)pExcepDispEntryPoint + MAX_INSTRUCTION_LENGTH * 2 + 1))
4756 LOG((LF_CORDB, LL_INFO10000,
4757 "Bypass instruction not redirected. Landed in exception dispatcher.\n"));
4759 return (TPR_IGNORE_AND_STOP);
4762 #endif // TARGET_UNIX
4764 // If the IP is close to the skip patch start, or if we were skipping over a call, then assume the IP needs
4766 if (m_instrAttrib.m_fIsCall ||
4767 ((size_t)GetIP(context) > (size_t)patchBypass &&
4768 (size_t)GetIP(context) <= (size_t)(patchBypass + MAX_INSTRUCTION_LENGTH + 1)))
4770 LOG((LF_CORDB, LL_INFO10000, "Bypass instruction redirected because still in skip area.\n"
4771 "\tm_fIsCall = %s, patchBypass = %p, m_address = %p\n",
4772 (m_instrAttrib.m_fIsCall ? "true" : "false"), patchBypass, m_address));
4773 SetIP(context, (PCODE)((BYTE *)GetIP(context) - (patchBypass - (BYTE *)m_address)));
4777 // Otherwise, need to see if the IP is something we recognize (either managed code
4778 // or stub code) - if not, we ignore the exception
4779 PCODE newIP = GetIP(context);
4780 newIP -= PCODE(patchBypass - (BYTE *)m_address);
4781 TraceDestination trace;
4783 if (g_pEEInterface->IsManagedNativeCode(dac_cast<PTR_CBYTE>(newIP)) ||
4784 (g_pEEInterface->TraceStub(LPBYTE(newIP), &trace)))
4786 LOG((LF_CORDB, LL_INFO10000, "Bypass instruction redirected because we landed in managed or stub code\n"));
4787 SetIP(context, newIP);
4790 // If we have no idea where things have gone, then we assume that the IP needs no adjusting (which
4791 // could happen if the instruction we were trying to patch skip caused an AV). In this case we want
4792 // to claim it as ours but ignore it and continue execution.
4795 LOG((LF_CORDB, LL_INFO10000, "Bypass instruction not redirected because we're not in managed or stub code.\n"));
4796 return (TPR_IGNORE_AND_STOP);
4802 LOG((LF_CORDB, LL_INFO10000, "Bypass instruction redirected because it wasn't a single step exception.\n"));
4803 SetIP(context, (PCODE)((BYTE *)GetIP(context) - (patchBypass - (BYTE *)m_address)));
4806 LOG((LF_CORDB, LL_ALWAYS, "DPS::TEH: IP now at %p\n", GetIP(context)));
4812 // Signals our thread that the debugger is done manipulating the context
4813 // during the patch skip operation. This effectively prevented other threads
4814 // from suspending us until we completed skiping the patch and restored
4815 // a good context (See DDB 188816)
4816 m_thread->EndDebuggerPatchSkip();
4818 // Don't delete the controller yet if this is a single step exception, as the code will still want to dispatch to
4819 // our single step method, and if it doesn't find something to dispatch to we won't continue from the exception.
4821 // (This is kind of broken behavior but is easily worked around here
4823 if (!IsSingleStep(exception->ExceptionCode))
4828 DisableExceptionHook();
4833 bool DebuggerPatchSkip::TriggerSingleStep(Thread *thread, const BYTE *ip)
4835 LOG((LF_CORDB,LL_INFO10000, "DPS::TSS: basically a no-op\n"));
4837 if (m_pAppDomain != NULL)
4839 AppDomain *pAppDomainCur = thread->GetDomain();
4841 if (pAppDomainCur != m_pAppDomain)
4843 LOG((LF_CORDB,LL_INFO10000, "DPS::TSS: Appdomain mismatch - "
4844 "not SingSteping!!\n"));
4849 #if defined(TARGET_AMD64)
4850 // Dev11 91932: for RIP-relative writes we need to copy the value that was written in our buffer to the actual address
4851 _ASSERTE(m_pSharedPatchBypassBuffer);
4852 if (m_pSharedPatchBypassBuffer->RipTargetFixup)
4854 _ASSERTE(m_pSharedPatchBypassBuffer->RipTargetFixupSize);
4856 BYTE* bufferBypass = m_pSharedPatchBypassBuffer->BypassBuffer;
4857 BYTE fixupSize = m_pSharedPatchBypassBuffer->RipTargetFixupSize;
4858 UINT_PTR targetFixup = m_pSharedPatchBypassBuffer->RipTargetFixup;
4863 *(reinterpret_cast<BYTE*>(targetFixup)) = *(reinterpret_cast<BYTE*>(bufferBypass));
4867 *(reinterpret_cast<WORD*>(targetFixup)) = *(reinterpret_cast<WORD*>(bufferBypass));
4871 *(reinterpret_cast<DWORD*>(targetFixup)) = *(reinterpret_cast<DWORD*>(bufferBypass));
4875 *(reinterpret_cast<ULONGLONG*>(targetFixup)) = *(reinterpret_cast<ULONGLONG*>(bufferBypass));
4881 memcpy(reinterpret_cast<void*>(targetFixup), bufferBypass, fixupSize);
4885 _ASSERTE(!"bad operand size. If you hit this and it was because we need to process instructions with larger \
4886 relative immediates, make sure to update the SharedPatchBypassBuffer size, the DebuggerHeapExecutableMemoryAllocator, \
4887 and structures depending on DBG_MAX_EXECUTABLE_ALLOC_SIZE.");
4891 LOG((LF_CORDB,LL_INFO10000, "DPS::TSS: triggered, about to delete\n"));
4898 // * -------------------------------------------------------------------------
4899 // * DebuggerBreakpoint routines
4900 // * -------------------------------------------------------------------------
4901 // DebuggerBreakpoint::DebuggerBreakpoint() The constructor
4902 // invokes AddBindAndActivatePatch to set the breakpoint
4903 DebuggerBreakpoint::DebuggerBreakpoint(Module *module,
4905 AppDomain *pAppDomain,
4908 SIZE_T ilEnCVersion, // must give the EnC version for non-native bps
4909 MethodDesc *nativeMethodDesc, // use only when m_native
4910 DebuggerJitInfo *nativeJITInfo, // optional when m_native, null otherwise
4911 bool nativeCodeBindAllVersions,
4914 : DebuggerController(NULL, pAppDomain)
4916 _ASSERTE(pSucceed != NULL);
4917 _ASSERTE((native == (nativeMethodDesc != NULL)) || nativeCodeBindAllVersions);
4918 _ASSERTE(native || nativeJITInfo == NULL);
4919 _ASSERTE(!nativeJITInfo || nativeJITInfo->m_jitComplete); // this is sent by the left-side, and it couldn't have got the code if the JIT wasn't complete
4921 if (native && !nativeCodeBindAllVersions)
4923 (*pSucceed) = AddBindAndActivateNativeManagedPatch(nativeMethodDesc, nativeJITInfo, offset, LEAF_MOST_FRAME, pAppDomain);
4927 _ASSERTE(!native || offset == 0);
4928 (*pSucceed) = AddILPatch(pAppDomain, module, md, NULL, ilEnCVersion, offset, !native);
4932 // TP_RESULT DebuggerBreakpoint::TriggerPatch()
4933 // What: This patch will always be activated.
4934 // How: return true.
4935 TP_RESULT DebuggerBreakpoint::TriggerPatch(DebuggerControllerPatch *patch,
4939 LOG((LF_CORDB, LL_INFO10000, "DB::TP\n"));
4944 // void DebuggerBreakpoint::SendEvent() What: Inform
4945 // the right side that the breakpoint was reached.
4946 // How: g_pDebugger->SendBreakpoint()
4947 bool DebuggerBreakpoint::SendEvent(Thread *thread, bool fIpChanged)
4952 SENDEVENT_CONTRACT_ITEMS;
4957 LOG((LF_CORDB, LL_INFO10000, "DB::SE: in DebuggerBreakpoint's SendEvent\n"));
4959 CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
4961 // If we got interrupted by SetIp, we just don't send the IPC event. Our triggers are still
4962 // active so no harm done.
4965 g_pDebugger->SendBreakpoint(thread, context, this);
4969 // Controller is still alive, will fire if we hit the breakpoint again.
4973 //* -------------------------------------------------------------------------
4974 // * DebuggerStepper routines
4975 // * -------------------------------------------------------------------------
4977 DebuggerStepper::DebuggerStepper(Thread *thread,
4978 CorDebugUnmappedStop rgfMappingStop,
4979 CorDebugIntercept interceptStop,
4980 AppDomain *appDomain)
4981 : DebuggerController(thread, appDomain),
4983 m_reason(STEP_NORMAL),
4984 m_fpStepInto(LEAF_MOST_FRAME),
4985 m_rgfInterceptStop(interceptStop),
4986 m_rgfMappingStop(rgfMappingStop),
4989 m_realRangeCount(0),
4990 m_fp(LEAF_MOST_FRAME),
4991 #if defined(FEATURE_EH_FUNCLETS)
4992 m_fpParentMethod(LEAF_MOST_FRAME),
4993 #endif // FEATURE_EH_FUNCLETS
4994 m_fpException(LEAF_MOST_FRAME),
4996 m_cFuncEvalNesting(0)
4999 m_fReadyToSend = false;
5003 DebuggerStepper::~DebuggerStepper()
5005 if (m_range != NULL)
5007 TRACE_FREE(m_range);
5008 DeleteInteropSafe(m_range);
5012 // bool DebuggerStepper::ShouldContinueStep() Return true if
5013 // the stepper should not stop at this address. The stepper should not
5014 // stop here if: here is in the {prolog,epilog,etc};
5015 // and the stepper is not interested in stopping here.
5016 // We assume that this is being called in the frame which the stepper steps
5017 // through. Unless, of course, we're returning from a call, in which
5018 // case we want to stop in the epilog even if the user didn't say so,
5019 // to prevent stepping out of multiple frames at once.
5020 // <REVISIT_TODO>Possible optimization: GetJitInfo, then AddPatch @ end of prolog?</REVISIT_TODO>
5021 bool DebuggerStepper::ShouldContinueStep( ControllerStackInfo *info,
5022 SIZE_T nativeOffset)
5024 LOG((LF_CORDB,LL_INFO10000, "DeSt::ShContSt: nativeOffset:0x%p \n", nativeOffset));
5025 if (m_rgfMappingStop != STOP_ALL && (m_reason != STEP_EXIT) )
5028 DebuggerJitInfo *ji = info->m_activeFrame.GetJitInfoFromFrame();
5032 LOG((LF_CORDB,LL_INFO10000,"DeSt::ShContSt: For code 0x%p, got "
5033 "DJI 0x%p, from 0x%p to 0x%p\n",
5034 (const BYTE*)GetControlPC(&(info->m_activeFrame.registers)),
5035 ji, ji->m_addrOfCode, ji->m_addrOfCode+ji->m_sizeOfCode));
5039 LOG((LF_CORDB,LL_INFO10000,"DeSt::ShCoSt: For code 0x%p, didn't "
5040 "get DJI\n",(const BYTE*)GetControlPC(&(info->m_activeFrame.registers))));
5042 return false; // Haven't a clue if we should continue, so
5045 CorDebugMappingResult map = MAPPING_UNMAPPED_ADDRESS;
5046 DWORD whichIDontCare;
5047 ji->MapNativeOffsetToIL( nativeOffset, &map, &whichIDontCare);
5048 unsigned int interestingMappings =
5049 (map & ~(MAPPING_APPROXIMATE | MAPPING_EXACT));
5051 LOG((LF_CORDB,LL_INFO10000,
5052 "DeSt::ShContSt: interestingMappings:0x%x m_rgfMappingStop:%x\n",
5053 interestingMappings,m_rgfMappingStop));
5055 // If we're in a prolog,epilog, then we may want to skip
5057 if ( interestingMappings )
5059 if ( interestingMappings & m_rgfMappingStop )
5068 bool DebuggerStepper::IsRangeAppropriate(ControllerStackInfo *info)
5070 LOG((LF_CORDB,LL_INFO10000, "DS::IRA: info:0x%p \n", info));
5071 if (m_range == NULL)
5073 LOG((LF_CORDB,LL_INFO10000, "DS::IRA: m_range == NULL, returning FALSE\n"));
5077 const FrameInfo *realFrame;
5079 #if defined(FEATURE_EH_FUNCLETS)
5080 bool fActiveFrameIsFunclet = info->m_activeFrame.IsNonFilterFuncletFrame();
5082 if (fActiveFrameIsFunclet)
5084 realFrame = &(info->GetReturnFrame());
5087 #endif // FEATURE_EH_FUNCLETS
5089 realFrame = &(info->m_activeFrame);
5092 LOG((LF_CORDB,LL_INFO10000, "DS::IRA: info->m_activeFrame.fp:0x%p m_fp:0x%p\n", info->m_activeFrame.fp, m_fp));
5093 LOG((LF_CORDB,LL_INFO10000, "DS::IRA: m_fdException:0x%p realFrame->md:0x%p realFrame->fp:0x%p m_fpException:0x%p\n",
5094 m_fdException, realFrame->md, realFrame->fp, m_fpException));
5095 if ( (info->m_activeFrame.fp == m_fp) ||
5096 ( (m_fdException != NULL) && (realFrame->md == m_fdException) &&
5097 IsEqualOrCloserToRoot(realFrame->fp, m_fpException) ) )
5099 LOG((LF_CORDB,LL_INFO10000, "DS::IRA: returning TRUE\n"));
5103 #if defined(FEATURE_EH_FUNCLETS)
5104 // There are three scenarios which make this function more complicated on WIN64.
5105 // 1) We initiate a step in the parent method or a funclet but end up stepping into another funclet closer to the leaf.
5106 // a) start in the parent method
5107 // b) start in a funclet
5108 // 2) We initiate a step in a funclet but end up stepping out to the parent method or a funclet closer to the root.
5109 // a) end up in the parent method
5110 // b) end up in a funclet
5111 // 3) We initiate a step and then change stack allocation within the method or funclet
5112 // In both cases the range of the stepper should still be appropriate.
5114 bool fValidParentMethodFP = (m_fpParentMethod != LEAF_MOST_FRAME);
5116 // All scenarios have the same condition
5117 if (fValidParentMethodFP && (m_fpParentMethod == info->GetReturnFrame(true).fp))
5119 LOG((LF_CORDB,LL_INFO10000, "DS::IRA: (parent SP) returning TRUE\n"));
5122 #endif // FEATURE_EH_FUNCLETS
5124 LOG((LF_CORDB,LL_INFO10000, "DS::IRA: returning FALSE\n"));
5128 // bool DebuggerStepper::IsInRange() Given the native offset ip,
5129 // returns true if ip falls within any of the native offset ranges specified
5130 // by the array of COR_DEBUG_STEP_RANGEs.
5131 // Returns true if ip falls within any of the ranges. Returns false
5132 // if ip doesn't, or if there are no ranges (rangeCount==0). Note that a
5133 // COR_DEBUG_STEP_RANGE with an endOffset of zero is interpreted as extending
5134 // from startOffset to the end of the method.
5135 // SIZE_T ip: Native offset, relative to the beginning of the method.
5136 // COR_DEBUG_STEP_RANGE *range: An array of ranges, which are themselves
5137 // native offsets, to compare against ip.
5138 // SIZE_T rangeCount: Number of elements in range
5139 bool DebuggerStepper::IsInRange(SIZE_T ip, COR_DEBUG_STEP_RANGE *range, SIZE_T rangeCount,
5140 ControllerStackInfo *pInfo)
5142 LOG((LF_CORDB,LL_INFO10000,"DS::IIR: off=%zx\n", ip));
5146 LOG((LF_CORDB,LL_INFO10000,"DS::IIR: range == NULL -> not in range\n"));
5150 if (pInfo && !IsRangeAppropriate(pInfo))
5152 LOG((LF_CORDB,LL_INFO10000,"DS::IIR: no pInfo or range not appropriate -> not in range\n"));
5156 COR_DEBUG_STEP_RANGE *r = range;
5157 COR_DEBUG_STEP_RANGE *rEnd = r + rangeCount;
5161 SIZE_T endOffset = r->endOffset ? r->endOffset : ~0;
5162 LOG((LF_CORDB,LL_INFO100000,"DS::IIR: so=0x%x, eo=0x%zx\n",
5163 r->startOffset, endOffset));
5165 if (ip >= r->startOffset && ip < endOffset)
5167 LOG((LF_CORDB,LL_INFO1000,"DS::IIR: this:%p, Found native offset 0x%zx to be in the range [0x%x, 0x%zx), index 0x%zx\n",
5168 this, ip, r->startOffset, endOffset, ((r-range)/sizeof(COR_DEBUG_STEP_RANGE *)) ));
5175 LOG((LF_CORDB,LL_INFO10000,"DS::IIR: not in range\n"));
5179 // bool DebuggerStepper::DetectHandleInterceptors() Return true if
5180 // the current execution takes place within an interceptor (that is, either
5181 // the current frame, or the parent frame is a framed frame whose
5182 // GetInterception method returns something other than INTERCEPTION_NONE),
5183 // and this stepper doesn't want to stop in an interceptor, and we successfully
5184 // set a breakpoint after the top-most interceptor in the stack.
5185 bool DebuggerStepper::DetectHandleInterceptors(ControllerStackInfo *info)
5187 LOG((LF_CORDB,LL_INFO10000,"DS::DHI: Start DetectHandleInterceptors\n"));
5188 LOG((LF_CORDB,LL_INFO10000,"DS::DHI: active frame=0x%p, has return frame=%d, return frame=0x%p m_reason:%d\n",
5189 info->m_activeFrame.frame, info->HasReturnFrame(), info->GetReturnFrame().frame, m_reason));
5191 // If this is a normal step, then we want to continue stepping, even if we
5192 // are in an interceptor.
5193 if (m_reason == STEP_NORMAL || m_reason == STEP_RETURN || m_reason == STEP_EXCEPTION_HANDLER)
5195 LOG((LF_CORDB,LL_INFO1000,"DS::DHI: Returning false while stepping within function, finally!\n"));
5199 bool fAttemptStepOut = false;
5201 if (m_rgfInterceptStop != INTERCEPT_ALL) // we may have to skip out of one
5203 if (info->m_activeFrame.frame != NULL &&
5204 info->m_activeFrame.frame != FRAME_TOP &&
5205 info->m_activeFrame.frame->GetInterception() != Frame::INTERCEPTION_NONE)
5207 if (!((CorDebugIntercept)info->m_activeFrame.frame->GetInterception() & Frame::Interception(m_rgfInterceptStop)))
5209 LOG((LF_CORDB,LL_INFO10000,"DS::DHI: Stepping out b/c of excluded frame type:0x%x\n",
5210 info->m_activeFrame.frame->GetInterception()));
5212 fAttemptStepOut = true;
5216 LOG((LF_CORDB,LL_INFO10000,"DS::DHI: 0x%p set to STEP_INTERCEPT\n", this));
5218 m_reason = STEP_INTERCEPT; //remember why we've stopped
5222 if ((m_reason == STEP_EXCEPTION_FILTER) ||
5223 (info->HasReturnFrame() &&
5224 info->GetReturnFrame().frame != NULL &&
5225 info->GetReturnFrame().frame != FRAME_TOP &&
5226 info->GetReturnFrame().frame->GetInterception() != Frame::INTERCEPTION_NONE))
5228 if (m_reason == STEP_EXCEPTION_FILTER)
5230 // Exceptions raised inside of the EE by COMPlusThrow, FCThrow, etc will not
5231 // insert an ExceptionFrame, and hence info->GetReturnFrame().frame->GetInterception()
5232 // will not be accurate. Hence we use m_reason instead
5234 if (!(Frame::INTERCEPTION_EXCEPTION & Frame::Interception(m_rgfInterceptStop)))
5236 LOG((LF_CORDB,LL_INFO10000,"DS::DHI: Stepping out b/c of excluded INTERCEPTION_EXCEPTION\n"));
5237 fAttemptStepOut = true;
5240 else if (!(info->GetReturnFrame().frame->GetInterception() & Frame::Interception(m_rgfInterceptStop)))
5242 LOG((LF_CORDB,LL_INFO10000,"DS::DHI: Stepping out b/c of excluded return frame type:0x%x\n",
5243 info->GetReturnFrame().frame->GetInterception()));
5245 fAttemptStepOut = true;
5248 if (!fAttemptStepOut)
5250 LOG((LF_CORDB,LL_INFO10000,"DS::DHI 0x%x set to STEP_INTERCEPT\n", this));
5252 m_reason = STEP_INTERCEPT; //remember why we've stopped
5255 else if (info->m_specialChainReason != CHAIN_NONE)
5257 if(!(info->m_specialChainReason & CorDebugChainReason(m_rgfInterceptStop)) )
5259 LOG((LF_CORDB,LL_INFO10000, "DS::DHI: (special) Stepping out b/c of excluded return frame type:0x%x\n",
5260 info->m_specialChainReason));
5262 fAttemptStepOut = true;
5266 LOG((LF_CORDB,LL_INFO10000,"DS::DHI 0x%x set to STEP_INTERCEPT\n", this));
5268 m_reason = STEP_INTERCEPT; //remember why we've stopped
5271 else if (info->m_activeFrame.frame == NULL)
5273 // Make sure we are not dealing with a chain here.
5274 if (info->m_activeFrame.HasMethodFrame())
5276 // Check whether we are executing in a class constructor.
5277 _ASSERTE(info->m_activeFrame.md != NULL);
5278 if (info->m_activeFrame.md->IsClassConstructor())
5280 // We are in a class constructor. Check whether we want to stop in it.
5281 if (!(CHAIN_CLASS_INIT & CorDebugChainReason(m_rgfInterceptStop)))
5283 LOG((LF_CORDB, LL_INFO10000, "DS::DHI: Stepping out b/c of excluded cctor:0x%x\n",
5286 fAttemptStepOut = true;
5290 LOG((LF_CORDB, LL_INFO10000,"DS::DHI 0x%x set to STEP_INTERCEPT\n", this));
5292 m_reason = STEP_INTERCEPT; //remember why we've stopped
5299 if (fAttemptStepOut)
5301 LOG((LF_CORDB,LL_INFO1000,"DS::DHI: Doing TSO!\n"));
5303 // TrapStepOut could alter the step reason if we're stepping out of an inteceptor and it looks like we're
5304 // running off the top of the program. So hold onto it here, and if our step reason becomes STEP_EXIT, then
5305 // reset it to what it was.
5306 CorDebugStepReason holdReason = m_reason;
5308 // @todo - should this be TrapStepNext??? But that may stop in a child...
5312 if (m_reason == STEP_EXIT)
5314 m_reason = holdReason;
5320 // We're not in a special area of code, so we don't want to continue unless some other part of the code decides that
5322 LOG((LF_CORDB,LL_INFO1000,"DS::DHI: Returning false, finally!\n"));
5328 //---------------------------------------------------------------------------------------
5330 // This function checks whether the given IP is in an LCG method. If so, it enables
5331 // JMC and does a step out. This effectively makes sure that we never stop in an LCG method.
5333 // There are two common scnearios here:
5334 // 1) We single-step into an LCG method from a managed method.
5335 // 2) We single-step off the end of a method called by an LCG method and end up in the calling LCG method.
5337 // In both cases, we don't want to stop in the LCG method. If the LCG method directly or indirectly calls
5338 // another user method, we want to stop there. Otherwise, we just want to step out back to the caller of
5339 // LCG method. In other words, what we want is exactly the JMC behaviour.
5342 // ip - the current IP where the thread is stopped at
5343 // pMD - This is the MethodDesc for the specified ip. This can be NULL, but if it's not,
5344 // then it has to match the specified IP.
5345 // pInfo - the ControllerStackInfo taken at the specified IP (see Notes below)
5348 // Returns TRUE if the specified IP is indeed in an LCG method, in which case this function has already
5349 // enabled all the traps to catch the thread, including turning on JMC, enabling unwind callback, and
5350 // putting a patch in the caller.
5353 // LCG methods don't show up in stackwalks done by the ControllerStackInfo. So even if the specified IP
5354 // is in an LCG method, the LCG method won't show up in the call strack. That's why we need to call
5355 // ControllerStackInfo::SetReturnFrameWithActiveFrame() in this function before calling TrapStepOut().
5356 // Otherwise TrapStepOut() will put a patch in the caller's caller (if there is one).
5359 BOOL DebuggerStepper::DetectHandleLCGMethods(const PCODE ip, MethodDesc * pMD, ControllerStackInfo * pInfo)
5361 // If a MethodDesc is specified, it has to match the given IP.
5362 _ASSERTE(pMD == NULL || pMD == g_pEEInterface->GetNativeCodeMethodDesc(ip));
5364 // Look up the MethodDesc for the given IP.
5367 // If the given IP is in unmanaged code, then it isn't an LCG method
5368 if (!g_pEEInterface->IsManagedNativeCode((const BYTE *)ip))
5371 pMD = g_pEEInterface->GetNativeCodeMethodDesc(ip);
5374 _ASSERTE(pMD != NULL);
5375 LOG((LF_CORDB, LL_INFO10000, "DS::DHLCGM: ip:%zx pMD:%p (%s::%s)\n",
5378 pMD->m_pszDebugClassName,
5379 pMD->m_pszDebugMethodName));
5381 if (!pMD->IsLCGMethod())
5384 // Enable all the traps to catch the thread.
5386 EnableJMCBackStop(pMD);
5388 pInfo->SetReturnFrameWithActiveFrame();
5394 // Steppers override these so that they can skip func-evals. Note that steppers can
5395 // be created & used inside of func-evals (nested-break states).
5396 // On enter, we check for freezing the stepper.
5397 void DebuggerStepper::TriggerFuncEvalEnter(Thread * thread)
5399 LOG((LF_CORDB, LL_INFO10000, "DS::TFEEnter, this=0x%p, old nest=%d\n", this, m_cFuncEvalNesting));
5401 // Since this is always called on the hijacking thread, we should be thread-safe
5402 _ASSERTE(thread == this->GetThread());
5407 m_cFuncEvalNesting++;
5409 if (m_cFuncEvalNesting == 1)
5411 // We're entering our 1st funceval, so freeze us.
5412 LOG((LF_CORDB, LL_INFO100000, "DS::TFEEnter - freezing stepper\n"));
5414 // Freeze the stepper by disabling all triggers
5415 m_bvFrozenTriggers = 0;
5418 // We dont explicitly disable single-stepping because the OS
5419 // gives us a new thread context during an exception. Since
5420 // all func-evals are done inside exceptions, we should never
5421 // have this problem.
5423 // Note: however, that if func-evals were no longer done in
5424 // exceptions, this would have to change.
5428 if (IsMethodEnterEnabled())
5430 m_bvFrozenTriggers |= kMethodEnter;
5431 DisableMethodEnter();
5437 LOG((LF_CORDB, LL_INFO100000, "DS::TFEEnter - new nest=%d\n", m_cFuncEvalNesting));
5441 // On Func-EvalExit, we check if the stepper is trying to step-out of a func-eval
5442 // (in which case we kill it)
5443 // or if we previously entered this func-eval and should thaw it now.
5444 void DebuggerStepper::TriggerFuncEvalExit(Thread * thread)
5446 LOG((LF_CORDB, LL_INFO10000, "DS::TFEExit, this=0x%p, old nest=%d\n", this, m_cFuncEvalNesting));
5448 // Since this is always called on the hijacking thread, we should be thread-safe
5449 _ASSERTE(thread == this->GetThread());
5454 m_cFuncEvalNesting--;
5456 if (m_cFuncEvalNesting == -1)
5458 LOG((LF_CORDB, LL_INFO100000, "DS::TFEExit - disabling stepper\n"));
5460 // we're exiting the func-eval session we were created in. So we just completely
5461 // disable ourselves so that we don't fire anything anymore.
5462 // The RS still has to free the stepper though.
5464 // This prevents us from stepping-out of a func-eval. For traditional steppers,
5465 // this is overkill since it won't have any outstanding triggers. (trap-step-out
5466 // won't patch if it crosses a func-eval frame).
5467 // But JMC-steppers have Method-Enter; and so this is the only place we have to
5471 else if (m_cFuncEvalNesting == 0)
5473 // We're back to our starting Func-eval session, we should have been frozen,
5475 LOG((LF_CORDB, LL_INFO100000, "DS::TFEExit - thawing stepper\n"));
5477 // Thaw the stepper (reenable triggers)
5478 if ((m_bvFrozenTriggers & kMethodEnter) != 0)
5480 EnableMethodEnter();
5482 m_bvFrozenTriggers = 0;
5487 LOG((LF_CORDB, LL_INFO100000, "DS::TFEExit - new nest=%d\n", m_cFuncEvalNesting));
5492 // Return true iff we set a patch (which implies to caller that we should
5493 // let controller run free and hit that patch)
5494 bool DebuggerStepper::TrapStepInto(ControllerStackInfo *info,
5496 TraceDestination *pTD)
5498 _ASSERTE( pTD != NULL );
5499 _ASSERTE(this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER);
5501 EnableTraceCall(LEAF_MOST_FRAME);
5502 if (IsCloserToRoot(info->m_activeFrame.fp, m_fpStepInto))
5503 m_fpStepInto = info->m_activeFrame.fp;
5505 LOG((LF_CORDB, LL_INFO1000, "DS::TSI this:%p m_fpStepInto:%p ip:%p\n",
5506 this, m_fpStepInto.GetSPValue(), ip));
5508 TraceDestination trace;
5510 // Trace through the stubs.
5511 // If we're calling from managed code, this should either succeed
5512 // or become an ecall into coreclr.
5513 // @todo - if this fails, we want to provide as much info as possible.
5514 if (!g_pEEInterface->TraceStub(ip, &trace)
5515 || !g_pEEInterface->FollowTrace(&trace))
5517 LOG((LF_CORDB, LL_INFO1000, "DS::TSI Failed to step into\n"));
5522 (*pTD) = trace; //bitwise copy
5524 // Step-in always operates at the leaf-most frame. Thus the frame pointer for any
5525 // patch for step-in should be LEAF_MOST_FRAME, regardless of whatever our current fp
5526 // is before the step-in.
5527 // Note that step-in may skip 'internal' frames (FrameInfo w/ internal=true) since
5528 // such frames may really just be a marker for an internal EE Frame on the stack.
5529 // However, step-out uses these frames b/c it may call frame->TraceFrame() on them.
5530 return PatchTrace(&trace,
5531 LEAF_MOST_FRAME, // step-in is always leaf-most frame.
5532 (m_rgfMappingStop&STOP_UNMANAGED)?(true):(false));
5535 // Enable the JMC backstop for stepping on Step-In.
5536 // This activate the JMC probes, which will provide a safety net
5537 // to stop a stepper if the StubManagers don't predict the call properly.
5538 // Ideally, this should never be necessary (because the SMs would do their job).
5539 void DebuggerStepper::EnableJMCBackStop(MethodDesc * pStartMethod)
5541 // JMC steppers should not need the JMC backstop unless a thread inadvertently stops in an LCG method.
5542 //_ASSERTE(DEBUGGER_CONTROLLER_JMC_STEPPER != this->GetDCType());
5544 // Since we should never hit the JMC backstop (since it's really a SM issue), we'll assert if we actually do.
5545 // However, there's 1 corner case here. If we trace calls at the start of the method before the JMC-probe,
5546 // then we'll still hit the JMC backstop in our own method.
5547 // Record that starting method. That way, if we end up hitting our JMC backstop in our own method,
5548 // we don't over aggressively fire the assert. (This won't work for recursive cases, but since this is just
5549 // changing an assert, we don't care).
5552 // May be NULL if we didn't start in a method.
5553 m_StepInStartMethod = pStartMethod;
5556 // We don't want traditional steppers to rely on MethodEnter (b/c it's not guaranteed to be correct),
5557 // but it may be a useful last resort.
5558 this->EnableMethodEnter();
5561 // Return true if the stepper can run free.
5562 bool DebuggerStepper::TrapStepInHelper(
5563 ControllerStackInfo * pInfo,
5564 const BYTE * ipCallTarget,
5565 const BYTE * ipNext,
5566 bool fCallingIntoFunclet,
5569 TraceDestination td;
5572 // Begin logging the step-in activity in debug builds.
5573 StubManager::DbgBeginLog((TADDR) ipNext, (TADDR) ipCallTarget);
5576 if (TrapStepInto(pInfo, ipCallTarget, &td))
5578 // If we placed a patch, see if we need to update our step-reason
5579 if (td.GetTraceType() == TRACE_MANAGED )
5581 // Possible optimization: Roll all of g_pEEInterface calls into
5582 // one function so we don't repeatedly get the CodeMan,etc
5583 MethodDesc *md = NULL;
5584 _ASSERTE( g_pEEInterface->IsManagedNativeCode((const BYTE *)td.GetAddress()) );
5585 md = g_pEEInterface->GetNativeCodeMethodDesc(td.GetAddress());
5587 DebuggerJitInfo* pDJI = g_pDebugger->GetJitInfoFromAddr(td.GetAddress());
5588 CodeRegionInfo code = CodeRegionInfo::GetCodeRegionInfo(pDJI, md);
5589 if (code.AddressToOffset((const BYTE *)td.GetAddress()) == 0)
5591 LOG((LF_CORDB,LL_INFO1000,"DS::TS %p m_reason = STEP_CALL @ip%p\n",
5592 this, (BYTE*)GetControlPC(&(pInfo->m_activeFrame.registers))));
5593 m_reason = STEP_CALL;
5597 LOG((LF_CORDB, LL_INFO1000, "Didn't step: md:%p td.type:%s td.address:%p, hot code address:%p\n",
5598 md, GetTType(td.GetTraceType()), td.GetAddress(), code.getAddrOfHotCode()));
5603 LOG((LF_CORDB,LL_INFO10000,"DS::TS else %p m_reason = STEP_CALL\n",
5605 m_reason = STEP_CALL;
5613 // If we can't figure out where the stepper should call into (likely because we can't find a stub-manager),
5614 // then enable the JMC backstop.
5615 EnableJMCBackStop(pInfo->m_activeFrame.md);
5619 // We ignore ipNext here. Instead we'll return false and let the caller (TrapStep)
5620 // set the patch for us.
5624 static bool IsTailCallJitHelper(const BYTE * ip)
5626 return TailCallStubManager::IsTailCallJitHelper(reinterpret_cast<PCODE>(ip));
5629 // Check whether a call to an IP will be a tailcall dispatched by first
5630 // returning. When a tailcall cannot be performed just with a jump instruction,
5631 // the code will be doing a regular call to a managed function called the
5632 // tailcall dispatcher. This functions dispatches tailcalls in a special way: if
5633 // there is a previous "tailcall aware" frame, then it will simply record the
5634 // next tailcall to perform and immediately return. Otherwise it will set up
5635 // such a tailcall aware frame and dispatch tailcalls. In the former case the
5636 // control flow will be a little peculiar in that the function will return
5637 // immediately, so we need special handling in the debugger for it. This
5638 // function detects that case to be used for those scenarios.
5639 static bool IsTailCallThatReturns(const BYTE * ip, ControllerStackInfo* info)
5641 MethodDesc* pTailCallDispatcherMD = TailCallHelp::GetTailCallDispatcherMD();
5642 if (pTailCallDispatcherMD == NULL)
5647 TraceDestination trace;
5648 if (!g_pEEInterface->TraceStub(ip, &trace) || !g_pEEInterface->FollowTrace(&trace))
5653 MethodDesc* pTargetMD =
5654 trace.GetTraceType() == TRACE_UNJITTED_METHOD
5655 ? trace.GetMethodDesc()
5656 : g_pEEInterface->GetNativeCodeMethodDesc(trace.GetAddress());
5658 if (pTargetMD != pTailCallDispatcherMD)
5663 LOG((LF_CORDB, LL_INFO1000, "ITCTR: target %p is the tailcall dispatcher\n", ip));
5665 _ASSERTE(info->HasReturnFrame());
5666 LPVOID retAddr = (LPVOID)GetControlPC(&info->GetReturnFrame().registers);
5667 TailCallTls* tls = GetThread()->GetTailCallTls();
5668 LPVOID tailCallAwareRetAddr = tls->GetFrame()->TailCallAwareReturnAddress;
5670 LOG((LF_CORDB,LL_INFO1000, "ITCTR: ret addr is %p, tailcall aware ret addr is %p\n",
5671 retAddr, tailCallAwareRetAddr));
5673 return retAddr == tailCallAwareRetAddr;
5676 // bool DebuggerStepper::TrapStep() TrapStep attepts to set a
5677 // patch at the next IL instruction to be executed. If we're stepping in &
5678 // the next IL instruction is a call, then this'll set a breakpoint inside
5679 // the code that will be called.
5680 // How: There are a number of cases, depending on where the IP
5682 // Unmanaged code: EnableTraceCall() & return false - try and get
5683 // it when it returns.
5684 // In a frame: if the <p in> param is true, then do an
5685 // EnableTraceCall(). If the frame isn't the top frame, also do
5686 // g_pEEInterface->TraceFrame(), g_pEEInterface->FollowTrace, and
5688 // Normal managed frame: create a Walker and walk the instructions until either
5689 // leave the provided range (AddPatch there, return true), or we don't know what the
5690 // next instruction is (say, after a call, or return, or branch - return false).
5691 // Returns a boolean indicating if we were able to set a patch successfully
5692 // in either this method, or (if in == true & the next instruction is a call)
5693 // inside a callee method.
5694 // true: Patch successfully placed either in this method or a callee,
5695 // so the stepping is taken care of.
5696 // false: Unable to place patch in either this method or any
5697 // applicable callee methods, so the only option the caller has to put
5698 // patch to control flow is to call TrapStepOut & try and place a patch
5699 // on the method that called the current frame's method.
5700 bool DebuggerStepper::TrapStep(ControllerStackInfo *info, bool in)
5702 LOG((LF_CORDB,LL_INFO10000,"DS::TS: this:%p\n", this));
5703 if (!info->m_activeFrame.managed)
5706 // We're not in managed code. Patch up all paths back in.
5709 LOG((LF_CORDB,LL_INFO10000, "DS::TS: not in managed code\n"));
5713 EnablePolyTraceCall();
5719 if (info->m_activeFrame.frame != NULL)
5723 // We're in some kind of weird frame. Patch further entry to the frame.
5724 // or if we can't, patch return from the frame
5727 LOG((LF_CORDB,LL_INFO10000, "DS::TS: in a weird frame\n"));
5731 EnablePolyTraceCall();
5733 // Only traditional steppers should patch a frame. JMC steppers will
5734 // just rely on TriggerMethodEnter.
5735 if (DEBUGGER_CONTROLLER_STEPPER == this->GetDCType())
5737 if (info->m_activeFrame.frame != FRAME_TOP)
5739 TraceDestination trace;
5741 CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers
5743 // This could be anywhere, especially b/c step could be on non-leaf frame.
5744 if (g_pEEInterface->TraceFrame(this->GetThread(),
5745 info->m_activeFrame.frame,
5747 &(info->m_activeFrame.registers))
5748 && g_pEEInterface->FollowTrace(&trace)
5749 && PatchTrace(&trace, info->m_activeFrame.fp,
5750 (m_rgfMappingStop&STOP_UNMANAGED)?
5764 LOG((LF_CORDB,LL_INFO1000, "GetJitInfo for pc = 0x%x (addr of "
5765 "that value:0x%x)\n", (const BYTE*)(GetControlPC(&info->m_activeFrame.registers)),
5766 info->m_activeFrame.registers.PCTAddr));
5769 // Note: we used to pass in the IP from the active frame to GetJitInfo, but there seems to be no value in that, and
5770 // it was causing problems creating a stepper while sitting in ndirect stubs after we'd returned from the unmanaged
5771 // function that had been called.
5772 DebuggerJitInfo *ji = info->m_activeFrame.GetJitInfoFromFrame();
5775 LOG((LF_CORDB,LL_INFO10000,"DS::TS: For code 0x%p, got DJI 0x%p, "
5776 "from 0x%p to 0x%p\n",
5777 (const BYTE*)(GetControlPC(&info->m_activeFrame.registers)),
5778 ji, ji->m_addrOfCode, ji->m_addrOfCode+ji->m_sizeOfCode));
5782 LOG((LF_CORDB,LL_INFO10000,"DS::TS: For code 0x%p, "
5783 "didn't get a DJI \n",
5784 (const BYTE*)(GetControlPC(&info->m_activeFrame.registers))));
5788 // We're in a normal managed frame - walk the code
5791 NativeWalker walker;
5793 LOG((LF_CORDB,LL_INFO1000, "DS::TS: &info->m_activeFrame.registers 0x%p\n", &info->m_activeFrame.registers));
5795 // !!! Eventually when using the fjit, we'll want
5796 // to walk the IL to get the next location, & then map
5797 // it back to native.
5798 walker.Init((BYTE*)GetControlPC(&(info->m_activeFrame.registers)), &info->m_activeFrame.registers);
5801 // Is the active frame really the active frame?
5802 // What if the thread is stopped at a managed debug event outside of a filter ctx? Eg, stopped
5803 // somewhere directly in mscorwks (like sending a LogMsg or ClsLoad event) or even at WaitForSingleObject.
5804 // ActiveFrame is either the stepper's initial frame or the frame of a filterctx.
5805 bool fIsActiveFrameLive = (info->m_activeFrame.fp == info->m_bottomFP);
5807 // If this thread isn't stopped in managed code, it can't be at the active frame.
5808 if (GetManagedStoppedCtx(this->GetThread()) == NULL)
5810 fIsActiveFrameLive = false;
5813 bool fIsJump = false;
5814 bool fCallingIntoFunclet = false;
5816 // If m_activeFrame is not the actual active frame,
5817 // we should skip this first switch - never single step, and
5818 // assume our context is bogus.
5819 if (fIsActiveFrameLive)
5821 // Note that by definition our walker must always be able to step
5822 // through a single instruction, so any return
5823 // of NULL IP's from those cases on the first step
5824 // means that an exception is going to be generated.
5826 // (On future steps, it can also mean that the destination
5827 // simply can't be computed.)
5828 WALK_TYPE wt = walker.GetOpcodeWalkType();
5834 LOG((LF_CORDB,LL_INFO10000, "DC::TS:Imm:WALK_RETURN\n"));
5836 // Normally a 'ret' opcode means we're at the end of a function and doing a step-out.
5837 // But the jit is free to use a 'ret' opcode to implement various goofy constructs like
5838 // managed filters, in which case we may ret to the same function or we may ret to some
5839 // internal CLR stub code.
5840 // So we'll just ignore this and tell the Stepper to enable every notification it has
5841 // and let the thread run free. This will include TrapStepOut() and EnableUnwind()
5842 // to catch any potential filters.
5845 // Go ahead and enable the single-step flag too. We know it's safe.
5846 // If this lands in random code, then TriggerSingleStep will just ignore it.
5849 // Don't set step-reason yet. If another trigger gets hit, it will set the reason.
5854 LOG((LF_CORDB,LL_INFO10000, "DC::TS:Imm:WALK_BRANCH\n"));
5855 // A branch can be handled just like a call. If the branch is within the current method, then we just
5856 // down to WALK_UNKNOWN, otherwise we handle it just like a call. Note: we need to force in=true
5857 // because for a jmp, in or over is the same thing, we're still going there, and the in==true case is
5858 // the case we want to use...
5864 LOG((LF_CORDB,LL_INFO10000, "DC::TS:Imm:WALK_CALL ip=%p nextip=%p skipip=%p\n", walker.GetIP(), walker.GetNextIP(), walker.GetSkipIP()));
5866 // If we're doing some sort of intra-method jump (usually, to get EIP in a clever way, via the CALL
5867 // instruction), then put the bp where we're going, NOT at the instruction following the call
5868 if (IsAddrWithinFrame(ji, info->m_activeFrame.md, walker.GetIP(), walker.GetNextIP()))
5870 LOG((LF_CORDB, LL_INFO1000, "Walk call within method!" ));
5874 if (walker.GetNextIP() != NULL)
5876 #ifdef FEATURE_EH_FUNCLETS
5877 // There are 4 places we could be jumping:
5878 // 1) to the beginning of the same method (recursive call)
5879 // 2) somewhere in the same funclet, that isn't the method start
5880 // 3) somewhere in the same method but different funclet
5881 // 4) somewhere in a different method
5883 // IsAddrWithinFrame ruled out option 2, IsAddrWithinMethodIncludingFunclet rules out option 4,
5884 // and checking the IP against the start address rules out option 1. That leaves option only what we
5885 // wanted, option #3
5886 fCallingIntoFunclet = IsAddrWithinMethodIncludingFunclet(ji, info->m_activeFrame.md, walker.GetNextIP()) &&
5887 ((CORDB_ADDRESS)(SIZE_T)walker.GetNextIP() != ji->m_addrOfCode);
5889 // At this point, we know that the call/branch target is not
5890 // in the current method. The possible cases is that this is
5891 // a jump or a tailcall-via-helper. There are two separate
5892 // tailcalling mechanisms: on x86 we use a JIT helper which
5893 // will look like a regular call and which won't return, so
5894 // a step over becomes a step out. On other platforms we use
5895 // a separate mechanism that will perform a tailcall by
5896 // returning to an IL stub first. A step over in this case
5897 // is done by stepping out to the previous user function
5899 if ((fIsJump && !fCallingIntoFunclet) || IsTailCallJitHelper(walker.GetNextIP()) ||
5900 IsTailCallThatReturns(walker.GetNextIP(), info))
5902 // A step-over becomes a step-out for a tail call.
5910 // To preserve the old behaviour, if this is not a tail call, then we assume we want to
5911 // follow the call/jump.
5918 // There are two cases where we need to perform a step-in. One, if the step operation is
5919 // a step-in. Two, if the target address of the call is in a funclet of the current method.
5920 // In this case, we want to step into the funclet even if the step operation is a step-over.
5921 if (in || fCallingIntoFunclet)
5923 if (TrapStepInHelper(info, walker.GetNextIP(), walker.GetSkipIP(), fCallingIntoFunclet, fIsJump))
5930 if (walker.GetSkipIP() == NULL)
5932 LOG((LF_CORDB,LL_INFO10000,"DS::TS 0x%p m_reason = STEP_CALL (skip)\n",
5934 m_reason = STEP_CALL;
5940 LOG((LF_CORDB,LL_INFO100000, "DC::TS:Imm:WALK_CALL Skip instruction\n"));
5946 LOG((LF_CORDB,LL_INFO10000,"DS::TS:WALK_UNKNOWN - curIP:%p "
5947 "nextIP:%p skipIP:%p 1st byte of opcode:0x%x\n", (BYTE*)GetControlPC(&(info->m_activeFrame.
5948 registers)), walker.GetNextIP(),walker.GetSkipIP(),
5949 *(BYTE*)GetControlPC(&(info->m_activeFrame.registers))));
5956 if (walker.GetNextIP() == NULL)
5964 } // if (fIsActiveFrameLive)
5967 // Use our range, if we're in the original
5971 COR_DEBUG_STEP_RANGE *range;
5974 if (info->m_activeFrame.fp == m_fp)
5977 rangeCount = m_rangeCount;
5986 // Keep walking until either we're out of range, or
5987 // else we can't predict ahead any more.
5992 const BYTE *ip = walker.GetIP();
5994 SIZE_T offset = CodeRegionInfo::GetCodeRegionInfo(ji, info->m_activeFrame.md).AddressToOffset(ip);
5996 LOG((LF_CORDB, LL_INFO1000, "Walking to ip 0x%p (natOff:0x%x)\n",ip,offset));
5998 if (!IsInRange(offset, range, rangeCount)
5999 && !ShouldContinueStep( info, offset ))
6001 AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
6004 info->GetReturnFrame().fp,
6009 switch (walker.GetOpcodeWalkType())
6013 LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_RETURN Adding Patch.\n"));
6015 // In the loop above, if we're at the return address, we'll check & see
6016 // if we're returning to elsewhere within the same method, and if so,
6017 // we'll single step rather than TrapStepOut. If we see a return in the
6018 // code stream, then we'll set a breakpoint there, so that we can
6019 // examine the return address, and decide whether to SS or TSO then
6020 AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
6023 info->GetReturnFrame().fp,
6029 LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL.\n"));
6031 // If we're doing some sort of intra-method jump (usually, to get EIP in a clever way, via the CALL
6032 // instruction), then put the bp where we're going, NOT at the instruction following the call
6033 if (IsAddrWithinFrame(ji, info->m_activeFrame.md, walker.GetIP(), walker.GetNextIP()))
6035 LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL IsAddrWithinFrame, Adding Patch.\n"));
6037 // How else to detect this?
6038 AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
6040 CodeRegionInfo::GetCodeRegionInfo(ji, info->m_activeFrame.md).AddressToOffset(walker.GetNextIP()),
6041 info->GetReturnFrame().fp,
6046 if (IsTailCallJitHelper(walker.GetNextIP()) || IsTailCallThatReturns(walker.GetNextIP(), info))
6050 AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
6053 info->GetReturnFrame().fp,
6059 #ifdef FEATURE_EH_FUNCLETS
6060 fCallingIntoFunclet = IsAddrWithinMethodIncludingFunclet(ji, info->m_activeFrame.md, walker.GetNextIP());
6062 if (in || fCallingIntoFunclet)
6064 LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL step in is true\n"));
6065 if (walker.GetNextIP() == NULL)
6067 LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL NextIP == NULL\n"));
6068 AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
6071 info->GetReturnFrame().fp,
6074 LOG((LF_CORDB,LL_INFO10000,"DS0x%x m_reason=STEP_CALL 2\n",
6076 m_reason = STEP_CALL;
6081 if (TrapStepInHelper(info, walker.GetNextIP(), walker.GetSkipIP(), fCallingIntoFunclet, false))
6088 LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL Calling GetSkipIP\n"));
6089 if (walker.GetSkipIP() == NULL)
6091 AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
6094 info->GetReturnFrame().fp,
6097 LOG((LF_CORDB,LL_INFO10000,"DS 0x%p m_reason=STEP_CALL4\n",this));
6098 m_reason = STEP_CALL;
6104 LOG((LF_CORDB, LL_INFO10000, "DS::TS: skipping over call.\n"));
6108 if (walker.GetNextIP() == NULL)
6110 AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
6113 info->GetReturnFrame().fp,
6121 LOG((LF_CORDB,LL_INFO1000,"Ending TrapStep\n"));
6124 bool DebuggerStepper::IsAddrWithinFrame(DebuggerJitInfo *dji,
6126 const BYTE* currentAddr,
6127 const BYTE* targetAddr)
6129 _ASSERTE(dji != NULL);
6131 bool result = IsAddrWithinMethodIncludingFunclet(dji, pMD, targetAddr);
6133 // We need to check if this is a recursive call. In RTM we should see if this method is really necessary,
6134 // since it looks like the X86 JIT doesn't emit intra-method jumps anymore.
6137 if ((CORDB_ADDRESS)(SIZE_T)targetAddr == dji->m_addrOfCode)
6143 #if defined(FEATURE_EH_FUNCLETS)
6144 // On WIN64, we also check whether the targetAddr and the currentAddr is in the same funclet.
6145 _ASSERTE(currentAddr != NULL);
6148 int currentFuncletIndex = dji->GetFuncletIndex((CORDB_ADDRESS)currentAddr, DebuggerJitInfo::GFIM_BYADDRESS);
6149 int targetFuncletIndex = dji->GetFuncletIndex((CORDB_ADDRESS)targetAddr, DebuggerJitInfo::GFIM_BYADDRESS);
6150 result = (currentFuncletIndex == targetFuncletIndex);
6152 #endif // FEATURE_EH_FUNCLETS
6157 // x86 shouldn't need to call this method directly. We should call IsAddrWithinFrame() on x86 instead.
6158 // That's why I use a name with the word "funclet" in it to scare people off.
6159 bool DebuggerStepper::IsAddrWithinMethodIncludingFunclet(DebuggerJitInfo *dji,
6161 const BYTE* targetAddr)
6163 _ASSERTE(dji != NULL);
6164 return CodeRegionInfo::GetCodeRegionInfo(dji, pMD).IsMethodAddress(targetAddr);
6167 void DebuggerStepper::TrapStepNext(ControllerStackInfo *info)
6169 LOG((LF_CORDB, LL_INFO10000, "DS::TrapStepNext, this=%p\n", this));
6170 // StepNext for a Normal stepper is just a step-out
6173 // @todo -should we also EnableTraceCall??
6176 // Is this frame interesting?
6177 // For a traditional stepper, all frames are interesting.
6178 bool DebuggerStepper::IsInterestingFrame(FrameInfo * pFrame)
6180 LIMITED_METHOD_CONTRACT;
6185 // Place a single patch somewhere up the stack to do a step-out
6186 void DebuggerStepper::TrapStepOut(ControllerStackInfo *info, bool fForceTraditional)
6188 ControllerStackInfo returnInfo;
6189 DebuggerJitInfo *dji;
6191 LOG((LF_CORDB, LL_INFO10000, "DS::TSO this:0x%p\n", this));
6193 bool fReturningFromFinallyFunclet = false;
6195 #if defined(FEATURE_EH_FUNCLETS)
6196 // When we step out of a funclet, we should do one of two things, depending
6197 // on the original stepping intention:
6198 // 1) If we originally want to step out, then we should skip the parent method.
6199 // 2) If we originally want to step in/over but we step off the end of the funclet,
6200 // then we should resume in the parent, if possible.
6201 if (info->m_activeFrame.IsNonFilterFuncletFrame())
6203 // There should always be a frame for the parent method.
6204 _ASSERTE(info->HasReturnFrame());
6207 while (info->HasReturnFrame() && info->m_activeFrame.md != info->GetReturnFrame().md)
6209 StackTraceTicket ticket(info);
6210 returnInfo.GetStackInfo(ticket, GetThread(), info->GetReturnFrame().fp, NULL);
6214 _ASSERTE(info->HasReturnFrame());
6217 _ASSERTE(info->m_activeFrame.md == info->GetReturnFrame().md);
6219 if (m_eMode == cStepOut)
6221 StackTraceTicket ticket(info);
6222 returnInfo.GetStackInfo(ticket, GetThread(), info->GetReturnFrame().fp, NULL);
6227 _ASSERTE(info->GetReturnFrame().managed);
6228 _ASSERTE(info->GetReturnFrame().frame == NULL);
6230 MethodDesc *md = info->GetReturnFrame().md;
6231 dji = info->GetReturnFrame().GetJitInfoFromFrame();
6233 // The return value of a catch funclet is the control PC to resume to.
6234 // The return value of a finally funclet has no meaning, so we need to check
6235 // if the return value is in the main method.
6236 LPVOID resumePC = GetRegdisplayReturnValue(&(info->m_activeFrame.registers));
6238 // For finally funclet, there are two possible situations. Either the finally is
6239 // called normally (i.e. no exception), in which case we simply fall through and
6240 // let the normal loop do its work below, or the finally is called by the EH
6241 // routines, in which case we need the unwind notification.
6242 if (IsAddrWithinMethodIncludingFunclet(dji, md, (const BYTE *)resumePC))
6244 SIZE_T reloffset = dji->m_codeRegionInfo.AddressToOffset((BYTE*)resumePC);
6246 AddBindAndActivateNativeManagedPatch(info->GetReturnFrame().md,
6249 info->GetReturnFrame().fp,
6252 LOG((LF_CORDB, LL_INFO10000,
6253 "DS::TSO:normally managed code AddPatch"
6254 " in %s::%s, offset 0x%x, m_reason=%d\n",
6255 info->GetReturnFrame().md->m_pszDebugClassName,
6256 info->GetReturnFrame().md->m_pszDebugMethodName,
6257 reloffset, m_reason));
6259 // Do not set m_reason to STEP_RETURN here. Logically, the funclet and the parent method are the
6260 // same method, so we should not "return" to the parent method.
6261 LOG((LF_CORDB, LL_INFO10000,"DS::TSO: done\n"));
6267 // This is the case where we step off the end of a finally funclet.
6268 fReturningFromFinallyFunclet = true;
6272 #endif // FEATURE_EH_FUNCLETS
6275 FramePointer dbgLastFP; // for debug, make sure we're making progress through the stack.
6278 while (info->HasReturnFrame())
6282 dbgLastFP = info->m_activeFrame.fp;
6285 // Continue walking up the stack & set a patch upon the next
6286 // frame up. We will eventually either hit managed code
6287 // (which we can set a definite patch in), or the top of the
6289 StackTraceTicket ticket(info);
6291 // The last parameter here is part of a really targeted (*cough* dirty) fix to
6292 // disable getting an unwanted UMChain to fix issue 650903 (See
6293 // code:ControllerStackInfo::WalkStack and code:TrackUMChain for the other
6294 // parts.) In the case of managed step out we know that we aren't interested in
6295 // unmanaged frames, and generating that unmanaged frame causes the stackwalker
6296 // not to report the managed frame that was at the same SP. However the unmanaged
6297 // frame might be used in the mixed-mode step out case so I don't suppress it
6299 returnInfo.GetStackInfo(ticket, GetThread(), info->GetReturnFrame().fp, NULL, !(m_rgfMappingStop & STOP_UNMANAGED));
6303 // If this assert fires, then it means that we're not making progress while
6304 // tracing up the towards the root of the stack. Likely an issue in the Left-Side's
6306 _ASSERTE(IsCloserToLeaf(dbgLastFP, info->m_activeFrame.fp));
6309 #ifdef FEATURE_MULTICASTSTUB_AS_IL
6310 if (info->m_activeFrame.md != nullptr && info->m_activeFrame.md->IsILStub() && info->m_activeFrame.md->AsDynamicMethodDesc()->IsMulticastStub())
6312 LOG((LF_CORDB, LL_INFO10000,
6313 "DS::TSO: multicast frame.\n"));
6315 // User break should always be called from managed code, so it should never actually hit this codepath.
6316 _ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT);
6318 // JMC steppers shouldn't be patching stubs.
6319 if (DEBUGGER_CONTROLLER_JMC_STEPPER == this->GetDCType())
6321 LOG((LF_CORDB, LL_INFO10000, "DS::TSO: JMC stepper skipping frame.\n"));
6325 TraceDestination trace;
6327 EnableTraceCall(info->m_activeFrame.fp);
6329 PCODE ip = GetControlPC(&(info->m_activeFrame.registers));
6330 if (g_pEEInterface->TraceStub((BYTE*)ip, &trace)
6331 && g_pEEInterface->FollowTrace(&trace)
6332 && PatchTrace(&trace, info->m_activeFrame.fp,
6337 #endif // FEATURE_MULTICASTSTUB_AS_IL
6338 if (info->m_activeFrame.md != nullptr && info->m_activeFrame.md->IsILStub() &&
6339 info->m_activeFrame.md->AsDynamicMethodDesc()->GetILStubType() == DynamicMethodDesc::StubTailCallCallTarget)
6341 // Normally the stack trace would not include IL stubs, but we
6342 // include this specific IL stub so that we can check if a call into
6343 // the tailcall dispatcher will result in any user code being
6344 // executed or will return and allow a previous tailcall dispatcher
6345 // to deal with the tailcall. Thus we just skip that frame here.
6346 LOG((LF_CORDB, LL_INFO10000,
6347 "DS::TSO: CallTailCallTarget frame.\n"));
6350 else if (info->m_activeFrame.managed)
6352 LOG((LF_CORDB, LL_INFO10000,
6353 "DS::TSO: return frame is managed.\n"));
6355 if (info->m_activeFrame.frame == NULL)
6357 // Returning normally to managed code.
6358 _ASSERTE(info->m_activeFrame.md != NULL);
6360 // Polymorphic check to skip over non-interesting frames.
6361 if (!fForceTraditional && !this->IsInterestingFrame(&info->m_activeFrame))
6364 dji = info->m_activeFrame.GetJitInfoFromFrame();
6365 _ASSERTE(dji != NULL);
6367 // Note: we used to pass in the IP from the active frame to GetJitInfo, but there seems to be no value
6368 // in that, and it was causing problems creating a stepper while sitting in ndirect stubs after we'd
6369 // returned from the unmanaged function that had been called.
6370 ULONG reloffset = info->m_activeFrame.relOffset;
6372 AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
6375 info->GetReturnFrame().fp,
6378 LOG((LF_CORDB, LL_INFO10000,
6379 "DS::TSO:normally managed code AddPatch"
6380 " in %s::%s, offset 0x%x, m_reason=%d\n",
6381 info->m_activeFrame.md->m_pszDebugClassName,
6382 info->m_activeFrame.md->m_pszDebugMethodName,
6383 reloffset, m_reason));
6386 // Do not set m_reason to STEP_RETURN here. Logically, the funclet and the parent method are the
6387 // same method, so we should not "return" to the parent method.
6388 if (!fReturningFromFinallyFunclet)
6390 m_reason = STEP_RETURN;
6394 else if (info->m_activeFrame.frame == FRAME_TOP)
6397 // Trad-stepper's step-out is actually like a step-next when we go off the top.
6398 // JMC-steppers do a true-step out. So for JMC-steppers, don't enable trace-call.
6399 if (DEBUGGER_CONTROLLER_JMC_STEPPER == this->GetDCType())
6401 LOG((LF_CORDB, LL_EVERYTHING, "DS::TSO: JMC stepper skipping exit-frame case.\n"));
6405 // User break should always be called from managed code, so it should never actually hit this codepath.
6406 _ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT);
6409 // We're walking off the top of the stack. Note that if we call managed code again,
6410 // this trace-call will cause us our stepper-to fire. So we'll actually do a
6411 // step-next; not a true-step out.
6412 EnableTraceCall(info->m_activeFrame.fp);
6414 LOG((LF_CORDB, LL_INFO1000, "DS::TSO: Off top of frame!\n"));
6416 m_reason = STEP_EXIT; //we're on the way out..
6418 // <REVISIT_TODO>@todo not that it matters since we don't send a
6419 // stepComplete message to the right side.</REVISIT_TODO>
6422 else if (info->m_activeFrame.frame->GetFrameType() == Frame::TYPE_FUNC_EVAL)
6424 // Note: we treat walking off the top of the stack and
6425 // walking off the top of a func eval the same way,
6426 // except that we don't enable trace call since we
6427 // know exactly where were going.
6429 LOG((LF_CORDB, LL_INFO1000,
6430 "DS::TSO: Off top of func eval!\n"));
6432 m_reason = STEP_EXIT;
6435 else if (info->m_activeFrame.frame->GetFrameType() == Frame::TYPE_SECURITY &&
6436 info->m_activeFrame.frame->GetInterception() == Frame::INTERCEPTION_NONE)
6438 // If we're stepping out of something that was protected by (declarative) security,
6439 // the security subsystem may leave a frame on the stack to cache it's computation.
6440 // HOWEVER, this isn't a real frame, and so we don't want to stop here. On the other
6441 // hand, if we're in the security goop (sec. executes managed code to do stuff), then
6442 // we'll want to use the "returning to stub case", below. GetInterception()==NONE
6443 // indicates that the frame is just a cache frame:
6444 // Skip it and keep on going
6446 LOG((LF_CORDB, LL_INFO10000,
6447 "DS::TSO: returning to a non-intercepting frame. Keep unwinding\n"));
6452 LOG((LF_CORDB, LL_INFO10000,
6453 "DS::TSO: returning to a stub frame.\n"));
6455 // User break should always be called from managed code, so it should never actually hit this codepath.
6456 _ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT);
6458 // JMC steppers shouldn't be patching stubs.
6459 if (DEBUGGER_CONTROLLER_JMC_STEPPER == this->GetDCType())
6461 LOG((LF_CORDB, LL_INFO10000, "DS::TSO: JMC stepper skipping frame.\n"));
6465 // We're returning to some funky frame.
6466 // (E.g. a security frame has called a native method.)
6468 // Patch the frame from entering other methods. This effectively gives the Step-out
6469 // a step-next behavior. For eg, this can be useful for step-out going between multicast delegates.
6470 // This step-next could actually land us leaf-more on the callstack than we currently are!
6471 // If we were a true-step out, we'd skip this and keep crawling.
6472 // up the callstack.
6474 // !!! For now, we assume that the TraceFrame entry
6475 // point is smart enough to tell where it is in the
6476 // calling sequence. We'll see how this holds up.
6477 TraceDestination trace;
6479 // We don't want notifications of trace-calls leaf-more than our current frame.
6480 // For eg, if our current frame calls out to unmanaged code and then back in,
6481 // we'll get a TraceCall notification. But since it's leaf-more than our current frame,
6482 // we don't care because we just want to step out of our current frame (and everything
6483 // our current frame may call).
6484 EnableTraceCall(info->m_activeFrame.fp);
6486 CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers
6488 if (g_pEEInterface->TraceFrame(GetThread(),
6489 info->m_activeFrame.frame, FALSE,
6490 &trace, &(info->m_activeFrame.registers))
6491 && g_pEEInterface->FollowTrace(&trace)
6492 && PatchTrace(&trace, info->m_activeFrame.fp,
6496 // !!! Problem: we don't know which return frame to use -
6497 // the TraceFrame patch may be in a frame below the return
6498 // frame, or in a frame parallel with it
6499 // (e.g. prestub popping itself & then calling.)
6501 // For now, I've tweaked the FP comparison in the
6502 // patch dispatching code to allow either case.
6507 LOG((LF_CORDB, LL_INFO10000,
6508 "DS::TSO: return frame is not managed.\n"));
6510 // Only step out to unmanaged code if we're actually
6511 // marked to stop in unamanged code. Otherwise, just loop
6512 // to get us past the unmanaged frames.
6513 if (m_rgfMappingStop & STOP_UNMANAGED)
6515 LOG((LF_CORDB, LL_INFO10000,
6516 "DS::TSO: return to unmanaged code "
6517 "m_reason=STEP_RETURN\n"));
6519 // Do not set m_reason to STEP_RETURN here. Logically, the funclet and the parent method are the
6520 // same method, so we should not "return" to the parent method.
6521 if (!fReturningFromFinallyFunclet)
6523 m_reason = STEP_RETURN;
6526 // We're stepping out into unmanaged code
6527 LOG((LF_CORDB, LL_INFO10000,
6528 "DS::TSO: Setting unmanaged trace patch at 0x%x(%x)\n",
6529 GetControlPC(&(info->m_activeFrame.registers)),
6530 info->GetReturnFrame().fp.GetSPValue()));
6532 AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)GetControlPC(&(info->m_activeFrame.registers)),
6533 info->GetReturnFrame().fp,
6543 // <REVISIT_TODO>If we get here, we may be stepping out of the last frame. Our thread
6544 // exit logic should catch this case. (@todo)</REVISIT_TODO>
6545 LOG((LF_CORDB, LL_INFO10000,"DS::TSO: done\n"));
6549 // void DebuggerStepper::StepOut()
6550 // Called by Debugger::HandleIPCEvent to setup
6551 // everything so that the process will step over the range of IL
6553 // How: Converts the provided array of ranges from IL ranges to
6554 // native ranges (if they're not native already), and then calls
6555 // TrapStep or TrapStepOut, like so:
6556 // Get the appropriate MethodDesc & JitInfo
6557 // Iterate through array of IL ranges, use
6558 // JitInfo::MapILRangeToMapEntryRange to translate IL to native
6560 // Set member variables to remember that the DebuggerStepper now uses
6561 // the ranges: m_range, m_rangeCount, m_stepIn, m_fp
6562 // If (!TrapStep()) then {m_stepIn = true; TrapStepOut()}
6563 // EnableUnwind( m_fp );
6564 void DebuggerStepper::StepOut(FramePointer fp, StackTraceTicket ticket)
6566 LOG((LF_CORDB, LL_INFO10000, "Attempting to step out, fp:0x%x this:0x%x"
6567 "\n", fp.GetSPValue(), this ));
6569 Thread *thread = GetThread();
6570 CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
6571 ControllerStackInfo info;
6573 // We pass in the ticket b/c this is called both when we're live (via
6574 // DebuggerUserBreakpoint) and when we're stopped (via normal StepOut)
6575 info.GetStackInfo(ticket, thread, fp, context);
6582 m_fp = info.m_activeFrame.fp;
6583 #if defined(FEATURE_EH_FUNCLETS)
6584 // We need to remember the parent method frame pointer here so that we will recognize
6585 // the range of the stepper as being valid when we return to the parent method or stackalloc.
6586 if (info.HasReturnFrame(true))
6588 m_fpParentMethod = info.GetReturnFrame(true).fp;
6590 #endif // FEATURE_EH_FUNCLETS
6594 _ASSERTE((fp == LEAF_MOST_FRAME) || (info.m_activeFrame.md != NULL) || (info.GetReturnFrame().md != NULL));
6600 #define GROW_RANGES_IF_NECESSARY() \
6601 if (rTo == rToEnd) \
6603 ULONG NewSize, OldSize; \
6604 if (!ClrSafeInt<ULONG>::multiply(sizeof(COR_DEBUG_STEP_RANGE), (ULONG)(realRangeCount*2), NewSize) || \
6605 !ClrSafeInt<ULONG>::multiply(sizeof(COR_DEBUG_STEP_RANGE), (ULONG)realRangeCount, OldSize) || \
6606 NewSize < OldSize) \
6608 DeleteInteropSafe(m_range); \
6612 COR_DEBUG_STEP_RANGE *_pTmp = (COR_DEBUG_STEP_RANGE*) \
6613 g_pDebugger->GetInteropSafeHeap()->Realloc(m_range, \
6617 if (_pTmp == NULL) \
6619 DeleteInteropSafe(m_range); \
6625 rTo = m_range + realRangeCount; \
6626 rToEnd = m_range + (realRangeCount*2); \
6627 realRangeCount *= 2; \
6630 //-----------------------------------------------------------------------------
6631 // Given a set of IL ranges, convert them to native and cache them.
6632 // Return true on success, false on error.
6633 //-----------------------------------------------------------------------------
6634 bool DebuggerStepper::SetRangesFromIL(DebuggerJitInfo *dji, COR_DEBUG_STEP_RANGE *ranges, SIZE_T rangeCount)
6640 PRECONDITION(ThisIsHelperThreadWorker()); // Only help initializes a stepper.
6641 PRECONDITION(m_range == NULL); // shouldn't be set already.
6642 PRECONDITION(CheckPointer(ranges));
6643 PRECONDITION(CheckPointer(dji));
6647 // Note: we used to pass in the IP from the active frame to GetJitInfo, but there seems to be no value in that, and
6648 // it was causing problems creating a stepper while sitting in ndirect stubs after we'd returned from the unmanaged
6649 // function that had been called.
6650 MethodDesc *fd = dji->m_nativeCodeVersion.GetMethodDesc();
6652 // The "+1" is for internal use, when we need to
6653 // set an intermediate patch in pitched code. Isn't
6654 // used unless the method is pitched & a patch is set
6655 // inside it. Thus we still pass cRanges as the
6657 m_range = new (interopsafe) COR_DEBUG_STEP_RANGE[rangeCount+1];
6659 if (m_range == NULL)
6662 TRACE_ALLOC(m_range);
6664 SIZE_T realRangeCount = rangeCount;
6668 LOG((LF_CORDB,LL_INFO10000,"DeSt::St: For code md=%p, got DJI %p, from %p to %p\n",
6669 fd, dji, dji->m_addrOfCode, dji->m_addrOfCode + dji->m_sizeOfCode));
6672 // Map ranges to native offsets for jitted code
6674 COR_DEBUG_STEP_RANGE *r, *rEnd, *rTo, *rToEnd;
6677 rEnd = r + rangeCount;
6680 rToEnd = rTo + realRangeCount;
6683 // rTo may also be incremented in the middle of the loop on WIN64 platforms.
6685 for (/**/; r < rEnd; r++, rTo++)
6687 // If we are already at the end of our allocated array, but there are still
6688 // more ranges to copy over, then grow the array.
6689 GROW_RANGES_IF_NECESSARY();
6691 if (r->startOffset == 0 && r->endOffset == (ULONG) ~0)
6693 // {0...-1} means use the entire method as the range
6694 // Code dup'd from below case.
6695 LOG((LF_CORDB, LL_INFO10000, "DS::Step: Have DJI, special (0,-1) entry\n"));
6696 rTo->startOffset = 0;
6697 rTo->endOffset = (ULONG32)g_pEEInterface->GetFunctionSize(fd);
6702 // One IL range may consist of multiple
6706 DebuggerILToNativeMap *mStart, *mEnd;
6708 dji->MapILRangeToMapEntryRange(r->startOffset,
6713 // Either mStart and mEnd are both NULL (we don't have any sequence point),
6714 // or they are both non-NULL.
6715 _ASSERTE( ((mStart == NULL) && (mEnd == NULL)) ||
6716 ((mStart != NULL) && (mEnd != NULL)) );
6720 // <REVISIT_TODO>@todo Won't this result in us stepping across
6721 // the entire method?</REVISIT_TODO>
6722 rTo->startOffset = 0;
6725 else if (mStart == mEnd)
6727 rTo->startOffset = mStart->nativeStartOffset;
6728 rTo->endOffset = mStart->nativeEndOffset;
6732 // Account for more than one continuous range here.
6734 // Move the pointer back to work with the loop increment below.
6735 // Don't dereference this pointer now!
6738 for (DebuggerILToNativeMap* pMap = mStart;
6742 if ((pMap == mStart) ||
6743 (pMap->nativeStartOffset != (pMap-1)->nativeEndOffset))
6746 GROW_RANGES_IF_NECESSARY();
6748 rTo->startOffset = pMap->nativeStartOffset;
6749 rTo->endOffset = pMap->nativeEndOffset;
6753 // If we have continuous ranges, then lump them together.
6754 _ASSERTE(rTo->endOffset == pMap->nativeStartOffset);
6755 rTo->endOffset = pMap->nativeEndOffset;
6759 LOG((LF_CORDB, LL_INFO10000, "DS::Step: nat off:0x%x to 0x%x\n", rTo->startOffset, rTo->endOffset));
6764 rangeCount = (int)((BYTE*)rTo - (BYTE*)m_range) / sizeof(COR_DEBUG_STEP_RANGE);
6768 // Even if we don't have debug info, we'll be able to
6769 // step through the method
6770 SIZE_T functionSize = g_pEEInterface->GetFunctionSize(fd);
6772 COR_DEBUG_STEP_RANGE *r = ranges;
6773 COR_DEBUG_STEP_RANGE *rEnd = r + rangeCount;
6775 COR_DEBUG_STEP_RANGE *rTo = m_range;
6777 for(/**/; r < rEnd; r++, rTo++)
6779 if (r->startOffset == 0 && r->endOffset == (ULONG) ~0)
6781 LOG((LF_CORDB, LL_INFO10000, "DS::Step:No DJI, (0,-1) special entry\n"));
6782 // Code dup'd from above case.
6783 // {0...-1} means use the entire method as the range
6784 rTo->startOffset = 0;
6785 rTo->endOffset = (ULONG32)functionSize;
6789 LOG((LF_CORDB, LL_INFO10000, "DS::Step:No DJI, regular entry\n"));
6790 // We can't just leave ths IL entry - we have to
6792 // This will just be ignored
6793 rTo->startOffset = rTo->endOffset = (ULONG32)functionSize;
6799 m_rangeCount = rangeCount;
6800 m_realRangeCount = rangeCount;
6806 // void DebuggerStepper::Step() Tells the stepper to step over
6807 // the provided ranges.
6808 // void *fp: frame pointer.
6809 // bool in: true if we want to step into a function within the range,
6810 // false if we want to step over functions within the range.
6811 // COR_DEBUG_STEP_RANGE *ranges: Assumed to be nonNULL, it will
6812 // always hold at least one element.
6813 // SIZE_T rangeCount: One less than the true number of elements in
6814 // the ranges argument.
6815 // bool rangeIL: true if the ranges are provided in IL (they'll be
6816 // converted to native before the DebuggerStepper uses them,
6817 // false if they already are native.
6818 bool DebuggerStepper::Step(FramePointer fp, bool in,
6819 COR_DEBUG_STEP_RANGE *ranges, SIZE_T rangeCount,
6822 LOG((LF_CORDB, LL_INFO1000, "DeSt:Step this:%p ", this));
6824 LOG((LF_CORDB,LL_INFO10000," start,end[0]:(0x%x,0x%x)\n",
6825 ranges[0].startOffset, ranges[0].endOffset));
6827 LOG((LF_CORDB,LL_INFO10000," single step\n"));
6829 Thread *thread = GetThread();
6830 CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
6832 // ControllerStackInfo doesn't report IL stubs, so if we are in an IL stub, we need
6833 // to handle the single-step specially. There are probably other problems when we stop
6834 // in an IL stub. We need to revisit this later.
6835 bool fIsILStub = false;
6836 if ((context != NULL) &&
6837 g_pEEInterface->IsManagedNativeCode(reinterpret_cast<const BYTE *>(GetIP(context))))
6839 MethodDesc * pMD = g_pEEInterface->GetNativeCodeMethodDesc(GetIP(context));
6842 fIsILStub = pMD->IsILStub();
6845 LOG((LF_CORDB, LL_INFO10000, "DS::S - fIsILStub = %d\n", fIsILStub));
6847 ControllerStackInfo info;
6850 StackTraceTicket ticket(thread);
6851 info.GetStackInfo(ticket, thread, fp, context);
6853 _ASSERTE((fp == LEAF_MOST_FRAME) || (info.m_activeFrame.md != NULL) ||
6854 (info.GetReturnFrame().md != NULL));
6858 DebuggerJitInfo *dji = info.m_activeFrame.GetJitInfoFromFrame();
6862 // !!! ERROR range step in frame with no code
6868 if (m_range != NULL)
6870 TRACE_FREE(m_range);
6871 DeleteInteropSafe(m_range);
6874 m_realRangeCount = 0;
6881 // IL ranges supplied, we need to convert them to native ranges.
6882 bool fOk = SetRangesFromIL(dji, ranges, rangeCount);
6890 // Native ranges, already supplied. Just copy them over.
6891 m_range = new (interopsafe) COR_DEBUG_STEP_RANGE[rangeCount];
6893 if (m_range == NULL)
6898 memcpy(m_range, ranges, sizeof(COR_DEBUG_STEP_RANGE) * rangeCount);
6899 m_realRangeCount = m_rangeCount = rangeCount;
6901 _ASSERTE(m_range != NULL);
6902 _ASSERTE(m_rangeCount > 0);
6903 _ASSERTE(m_realRangeCount > 0);
6907 // !!! ERROR cannot map IL ranges
6914 // Don't use the ControllerStackInfo if we are in an IL stub.
6919 m_fp = info.m_activeFrame.fp;
6920 #if defined(FEATURE_EH_FUNCLETS)
6921 // We need to remember the parent method frame pointer here so that we will recognize
6922 // the range of the stepper as being valid when we return to the parent method or stackalloc.
6923 if (info.HasReturnFrame(true))
6925 m_fpParentMethod = info.GetReturnFrame(true).fp;
6927 #endif // FEATURE_EH_FUNCLETS
6929 m_eMode = m_stepIn ? cStepIn : cStepOver;
6931 LOG((LF_CORDB,LL_INFO10000,"DS::Step %p STEP_NORMAL\n",this));
6932 m_reason = STEP_NORMAL; //assume it'll be a normal step & set it to
6933 //something else if we walk over it
6936 LOG((LF_CORDB, LL_INFO10000, "DS::Step: stepping in an IL stub\n"));
6938 // Enable the right triggers if the user wants to step in.
6941 if (this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER)
6943 EnableTraceCall(info.m_activeFrame.fp);
6945 else if (this->GetDCType() == DEBUGGER_CONTROLLER_JMC_STEPPER)
6947 EnableMethodEnter();
6951 // Also perform a step-out in case this IL stub is returning to managed code.
6952 // However, we must fix up the ControllerStackInfo first, since it doesn't
6953 // report IL stubs. The active frame reported by the ControllerStackInfo is
6954 // actually the return frame in this case.
6955 info.SetReturnFrameWithActiveFrame();
6958 else if (!TrapStep(&info, in))
6960 LOG((LF_CORDB,LL_INFO10000,"DS::Step: Did TS\n"));
6962 TrapStepNext(&info);
6965 LOG((LF_CORDB,LL_INFO10000,"DS::Step: Did TS,TSO\n"));
6972 // TP_RESULT DebuggerStepper::TriggerPatch()
6973 // What: Triggers patch if we're not in a stub, and we're
6974 // outside of the stepping range. Otherwise sets another patch so as to
6975 // step out of the stub, or in the next instruction within the range.
6976 // How: If module==NULL & managed==> we're in a stub:
6977 // TrapStepOut() and return false. Module==NULL&!managed==> return
6978 // true. If m_range != NULL & execution is currently in the range,
6979 // attempt a TrapStep (TrapStepOut otherwise) & return false. Otherwise,
6981 TP_RESULT DebuggerStepper::TriggerPatch(DebuggerControllerPatch *patch,
6985 LOG((LF_CORDB, LL_INFO10000, "DS::TP\n"));
6987 // If we're frozen, we may hit a patch but we just ignore it
6990 LOG((LF_CORDB, LL_INFO1000000, "DS::TP, ignoring patch at %p during frozen state\n", patch->address));
6994 Module *module = patch->key.module;
6995 BOOL managed = patch->IsManagedPatch();
6996 mdMethodDef md = patch->key.md;
6997 SIZE_T offset = patch->offset;
6999 _ASSERTE((this->GetThread() == thread) || !"Stepper should only get patches on its thread");
7001 // Note we can only run a stack trace if:
7002 // - the context is in managed code (eg, not a stub)
7003 // - OR we have a frame in place to prime the stackwalk.
7004 ControllerStackInfo info;
7005 CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
7007 _ASSERTE(!ISREDIRECTEDTHREAD(thread));
7009 // Context should always be from patch.
7010 _ASSERTE(context != NULL);
7012 bool fSafeToDoStackTrace = true;
7014 // If we're in a stub (module == NULL and still in managed code), then our context is off in lala-land
7015 // Then, it's only safe to do a stackwalk if the top frame is protecting us. That's only true for a
7016 // frame_push. If we're here on a manager_push, then we don't have any such protection, so don't do the
7019 fSafeToDoStackTrace = patch->IsSafeForStackTrace();
7022 if (fSafeToDoStackTrace)
7024 StackTraceTicket ticket(patch);
7025 info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, context);
7027 LOG((LF_CORDB, LL_INFO10000, "DS::TP: this:0x%p in %s::%s (fp:0x%p, "
7028 "off:0x%p md:0x%p), \n\texception source:%s::%s (fp:0x%p)\n",
7030 info.m_activeFrame.md!=NULL?info.m_activeFrame.md->m_pszDebugClassName:"Unknown",
7031 info.m_activeFrame.md!=NULL?info.m_activeFrame.md->m_pszDebugMethodName:"Unknown",
7032 info.m_activeFrame.fp.GetSPValue(), patch->offset, patch->key.md,
7033 m_fdException!=NULL?m_fdException->m_pszDebugClassName:"None",
7034 m_fdException!=NULL?m_fdException->m_pszDebugMethodName:"None",
7035 m_fpException.GetSPValue()));
7040 if (DetectHandleLCGMethods(dac_cast<PCODE>(patch->address), NULL, &info))
7047 // JMC steppers should not be patching here...
7048 _ASSERTE(DEBUGGER_CONTROLLER_JMC_STEPPER != this->GetDCType());
7053 LOG((LF_CORDB, LL_INFO10000,
7054 "Frame (stub) patch hit at offset 0x%x\n", offset));
7056 // This is a stub patch. If it was a TRACE_FRAME_PUSH that
7057 // got us here, then the stub's frame is pushed now, so we
7058 // tell the frame to apply the real patch. If we got here
7059 // via a TRACE_MGR_PUSH, however, then there is no frame
7060 // and we tell the stub manager that generated the
7061 // TRACE_MGR_PUSH to apply the real patch.
7062 TraceDestination trace;
7064 FramePointer frameFP;
7065 PTR_BYTE traceManagerRetAddr = NULL;
7067 if (patch->trace.GetTraceType() == TRACE_MGR_PUSH)
7069 _ASSERTE(context != NULL);
7070 CONTRACT_VIOLATION(GCViolation);
7071 traceOk = g_pEEInterface->TraceManager(
7073 patch->trace.GetStubManager(),
7076 &traceManagerRetAddr);
7078 // We don't hae an active frame here, so patch with a
7079 // FP of NULL so anything will match.
7081 // <REVISIT_TODO>@todo: should we take Esp out of the context?</REVISIT_TODO>
7082 frameFP = LEAF_MOST_FRAME;
7086 _ASSERTE(fSafeToDoStackTrace);
7087 CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers
7088 traceOk = g_pEEInterface->TraceFrame(thread,
7092 &(info.m_activeFrame.registers));
7094 frameFP = info.m_activeFrame.fp;
7097 // Enable the JMC backstop for traditional steppers to catch us in case
7098 // we didn't predict the call target properly.
7099 EnableJMCBackStop(NULL);
7103 || !g_pEEInterface->FollowTrace(&trace)
7104 || !PatchTrace(&trace, frameFP,
7105 (m_rgfMappingStop&STOP_UNMANAGED)?
7109 // We can't set a patch in the frame -- we need
7110 // to trap returning from this frame instead.
7112 // Note: if we're in the TRACE_MGR_PUSH case from
7113 // above, then we must place a patch where the
7114 // TraceManager function told us to, since we can't
7115 // actually unwind from here.
7117 if (patch->trace.GetTraceType() != TRACE_MGR_PUSH)
7119 _ASSERTE(fSafeToDoStackTrace);
7120 LOG((LF_CORDB,LL_INFO10000,"TSO for non TRACE_MGR_PUSH case\n"));
7125 LOG((LF_CORDB, LL_INFO10000,
7126 "TSO for TRACE_MGR_PUSH case. RetAddr: 0x%p\n", traceManagerRetAddr));
7128 // We'd better have a valid return address.
7129 _ASSERTE(traceManagerRetAddr != NULL);
7131 if (g_pEEInterface->IsManagedNativeCode(traceManagerRetAddr))
7133 // Grab the jit info for the method.
7134 DebuggerJitInfo *dji;
7135 dji = g_pDebugger->GetJitInfoFromAddr((TADDR) traceManagerRetAddr);
7137 MethodDesc* mdNative = NULL;
7138 PCODE pcodeNative = NULL;
7141 mdNative = dji->m_nativeCodeVersion.GetMethodDesc();
7142 pcodeNative = dji->m_nativeCodeVersion.GetNativeCode();
7146 // Find the method that the return is to.
7147 mdNative = g_pEEInterface->GetNativeCodeMethodDesc(dac_cast<PCODE>(traceManagerRetAddr));
7148 _ASSERTE(g_pEEInterface->GetFunctionAddress(mdNative) != NULL);
7149 pcodeNative = g_pEEInterface->GetFunctionAddress(mdNative);
7152 _ASSERTE(mdNative != NULL && pcodeNative != NULL);
7153 SIZE_T offsetRet = dac_cast<TADDR>(traceManagerRetAddr - pcodeNative);
7154 LOG((LF_CORDB, LL_INFO10000,
7155 "DS::TP: Before normally managed code AddPatch"
7156 " in %s::%s \n\tmd=0x%p, offset 0x%x, pcode=0x%p, dji=0x%p\n",
7157 mdNative->m_pszDebugClassName,
7158 mdNative->m_pszDebugMethodName,
7165 AddBindAndActivateNativeManagedPatch(mdNative,
7173 // We're hitting this code path with MC++ assemblies
7174 // that have an unmanaged entry point so the stub returns to CallDescrWorker.
7175 _ASSERTE(g_pEEInterface->GetNativeCodeMethodDesc(dac_cast<PCODE>(patch->address))->IsILStub());
7180 m_reason = STEP_NORMAL; //we tried to do a STEP_CALL, but since it didn't
7181 //work, we're doing what amounts to a normal step.
7182 LOG((LF_CORDB,LL_INFO10000,"DS 0x%p m_reason = STEP_NORMAL"
7183 "(attempted call thru stub manager, SM didn't know where"
7184 " we're going, so did a step out to original call\n",this));
7188 m_reason = STEP_CALL;
7191 EnableTraceCall(LEAF_MOST_FRAME);
7198 // @todo - when would we hit this codepath?
7199 // If we're not in managed, then we should have pushed a frame onto the Thread's frame chain,
7200 // and thus we should still safely be able to do a stackwalk here.
7201 _ASSERTE(fSafeToDoStackTrace);
7202 if (DetectHandleInterceptors(&info) )
7204 return TPR_IGNORE; //don't actually want to stop
7207 LOG((LF_CORDB, LL_INFO10000,
7208 "Unmanaged step patch hit at 0x%x\n", offset));
7210 StackTraceTicket ticket(patch);
7211 PrepareForSendEvent(ticket);
7214 } // end (module == NULL)
7216 // If we're inside an interceptor but don't want to be,then we'll set a
7217 // patch outside the current function.
7218 _ASSERTE(fSafeToDoStackTrace);
7219 if (DetectHandleInterceptors(&info) )
7221 return TPR_IGNORE; //don't actually want to stop
7224 LOG((LF_CORDB,LL_INFO10000, "DS: m_fp:0x%p, activeFP:0x%p fpExc:0x%p\n",
7225 m_fp.GetSPValue(), info.m_activeFrame.fp.GetSPValue(), m_fpException.GetSPValue()));
7227 if (IsInRange(offset, m_range, m_rangeCount, &info) ||
7228 ShouldContinueStep( &info, offset))
7230 LOG((LF_CORDB, LL_INFO10000,
7231 "Intermediate step patch hit at 0x%x\n", offset));
7233 if (!TrapStep(&info, m_stepIn))
7234 TrapStepNext(&info);
7241 LOG((LF_CORDB, LL_INFO10000, "Step patch hit at 0x%x\n", offset));
7243 // For a JMC stepper, we have an additional constraint:
7244 // skip non-user code. So if we're still in non-user code, then
7245 // we've got to keep going
7246 DebuggerMethodInfo * dmi = g_pDebugger->GetOrCreateMethodInfo(module, md);
7248 if ((dmi != NULL) && DetectHandleNonUserCode(&info, dmi))
7253 StackTraceTicket ticket(patch);
7254 PrepareForSendEvent(ticket);
7259 // Return true if this should be skipped.
7260 // For a non-jmc stepper, we don't care about non-user code, so we
7261 // don't skip it and so we always return false.
7262 bool DebuggerStepper::DetectHandleNonUserCode(ControllerStackInfo *info, DebuggerMethodInfo * pInfo)
7264 LIMITED_METHOD_CONTRACT;
7269 // For regular steppers, trace-call is just a trace-call.
7270 void DebuggerStepper::EnablePolyTraceCall()
7272 this->EnableTraceCall(LEAF_MOST_FRAME);
7275 // Traditional steppers enable MethodEnter as a back-stop for step-in.
7276 // We hope that the stub-managers will predict the step-in for us,
7277 // but in case they don't the Method-Enter should catch us.
7278 // MethodEnter is not fully correct for traditional steppers for a few reasons:
7279 // - doesn't handle step-in to native
7280 // - stops us *after* the prolog (a traditional stepper can stop us before the prolog).
7281 // - only works for methods that have the JMC probe. That can exclude all optimized code.
7282 void DebuggerStepper::TriggerMethodEnter(Thread * thread,
7283 DebuggerJitInfo *dji,
7287 _ASSERTE(dji != NULL);
7288 _ASSERTE(thread != NULL);
7289 _ASSERTE(ip != NULL);
7293 _ASSERTE(this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER);
7295 _ASSERTE(!IsFrozen());
7297 MethodDesc * pDesc = dji->m_nativeCodeVersion.GetMethodDesc();
7298 LOG((LF_CORDB, LL_INFO10000, "DS::TME, desc=%p, addr=%p\n",
7301 // JMC steppers won't stop in Lightweight codegen (LCG). Just return & keep executing.
7302 if (pDesc->IsNoMetadata())
7304 LOG((LF_CORDB, LL_INFO100000, "DS::TME, skipping b/c it's dynamic code (LCG)\n"));
7308 // This is really just a heuristic. We don't want to trigger a JMC probe when we are
7309 // executing in an IL stub, or in one of the marshaling methods called by the IL stub.
7310 // The problem is that the IL stub can call into arbitrary code, including custom marshalers.
7311 // In that case the user has to put a breakpoint to stop in the code.
7312 if (g_pEEInterface->DetectHandleILStubs(thread))
7318 // To help trace down if a problem is related to a stubmanager,
7319 // we add a knob that lets us skip the MethodEnter checks. This lets tests directly
7320 // go against the Stub-managers w/o the MethodEnter check backstops.
7321 int fSkip = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgSkipMEOnStep);
7327 // See EnableJMCBackStop() for details here. This check just makes sure that we don't fire
7328 // the assert if we end up in the method we started in (which could happen if we trace call
7329 // instructions before the JMC probe).
7330 // m_StepInStartMethod may be null (if this step-in didn't start from managed code).
7331 if ((m_StepInStartMethod != pDesc) &&
7332 (!m_StepInStartMethod->IsLCGMethod()))
7334 // Since normal step-in should stop us at the prolog, and TME is after the prolog,
7335 // if a stub-manager did successfully find the address, we should get a TriggerPatch first
7336 // at native offset 0 (before the prolog) and before we get the TME. That means if
7337 // we do get the TME, then there was no stub-manager to find us.
7340 StubManager::DbgGetLog(&sLog);
7342 // Assert b/c the Stub-manager should have caught us first.
7343 // We don't want people relying on TriggerMethodEnter as the real implementation for Traditional Step-in
7344 // (see above for reasons why). However, using TME will provide a bandage for the final retail product
7345 // in cases where we are missing a stub-manager.
7346 CONSISTENCY_CHECK_MSGF(false, (
7347 "\nThe Stubmanagers failed to identify and trace a stub on step-in. The stub-managers for this code-path path need to be fixed.\n"
7348 "See http://team/sites/clrdev/Devdocs/StubManagers.rtf for more information on StubManagers.\n"
7349 "Stepper this=0x%p, startMethod='%s::%s'\n"
7350 "---------------------------------\n"
7351 "Stub manager log:\n%s"
7353 "The thread is now in managed method '%s::%s'.\n"
7354 "---------------------------------\n",
7356 ((m_StepInStartMethod == NULL) ? "unknown" : m_StepInStartMethod->m_pszDebugClassName),
7357 ((m_StepInStartMethod == NULL) ? "unknown" : m_StepInStartMethod->m_pszDebugMethodName),
7359 pDesc->m_pszDebugClassName, pDesc->m_pszDebugMethodName
7364 // Place a patch to stop us.
7365 // Don't bind to a particular AppDomain so that we can do a Cross-Appdomain step.
7366 AddBindAndActivateNativeManagedPatch(pDesc,
7368 CodeRegionInfo::GetCodeRegionInfo(dji, pDesc).AddressToOffset(ip),
7373 LOG((LF_CORDB, LL_INFO10000, "DS::TME, after setting patch to stop\n"));
7375 // Once we resume, we'll go hit that patch since we patched our return address.
7376 // Furthermore, we know the step will complete with reason = call, so set that now.
7377 m_reason = STEP_CALL;
7381 // We may have single-stepped over a return statement to land us up a frame.
7382 // Or we may have single-stepped through a method.
7383 // We never single-step into calls (we place a patch at the call destination).
7384 bool DebuggerStepper::TriggerSingleStep(Thread *thread, const BYTE *ip)
7386 LOG((LF_CORDB,LL_INFO10000,"DS::TSS this:0x%p, @ ip:0x%p\n", this, ip));
7388 _ASSERTE(!IsFrozen());
7390 // User break should only do a step-out and never actually need a singlestep flag.
7391 _ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT);
7394 // there's one weird case here - if the last instruction generated
7395 // a hardware exception, we may be in lala land. If so, rely on the unwind
7396 // handler to figure out what happened.
7398 // <REVISIT_TODO>@todo this could be wrong when we have the incremental collector going</REVISIT_TODO>
7401 if (!g_pEEInterface->IsManagedNativeCode(ip))
7403 LOG((LF_CORDB,LL_INFO10000, "DS::TSS: not in managed code, Returning false (case 0)!\n"));
7404 DisableSingleStep();
7408 // If we EnC the method, we'll blast the function address,
7409 // and so have to get it from the DJI that we'll have. If
7410 // we haven't gotten debugger info about a regular function, then
7411 // we'll have to get the info from the EE, which will be valid
7412 // since we're standing in the function at this point, and
7413 // EnC couldn't have happened yet.
7414 MethodDesc *fd = g_pEEInterface->GetNativeCodeMethodDesc((PCODE)ip);
7417 DebuggerJitInfo *dji = g_pDebugger->GetJitInfoFromAddr((TADDR) ip);
7418 offset = CodeRegionInfo::GetCodeRegionInfo(dji, fd).AddressToOffset(ip);
7420 ControllerStackInfo info;
7422 // Safe to stackwalk b/c we've already checked that our IP is in crawlable code.
7423 StackTraceTicket ticket(ip);
7424 info.GetStackInfo(ticket, GetThread(), LEAF_MOST_FRAME, NULL);
7426 // This is a special case where we return from a managed method back to an IL stub. This can
7427 // only happen if there's no more managed method frames closer to the root and we want to perform
7428 // a step out, or if we step-next off the end of a method called by an IL stub. In either case,
7429 // we'll get a single step in an IL stub, which we want to ignore. We also want to enable trace
7430 // call here, just in case this IL stub is about to call the managed target (in the reverse interop case).
7433 LOG((LF_CORDB,LL_INFO10000, "DS::TSS: not in managed code, Returning false (case 0)!\n"));
7434 if (this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER)
7436 EnableTraceCall(info.m_activeFrame.fp);
7438 else if (this->GetDCType() == DEBUGGER_CONTROLLER_JMC_STEPPER)
7440 EnableMethodEnter();
7442 DisableSingleStep();
7448 LOG((LF_CORDB,LL_INFO10000, "DS::TSS m_fp:0x%p, activeFP:0x%p fpExc:0x%p\n",
7449 m_fp.GetSPValue(), info.m_activeFrame.fp.GetSPValue(), m_fpException.GetSPValue()));
7451 if (DetectHandleLCGMethods((PCODE)ip, fd, &info))
7456 if (IsInRange(offset, m_range, m_rangeCount, &info) ||
7457 ShouldContinueStep( &info, offset))
7459 if (!TrapStep(&info, m_stepIn))
7460 TrapStepNext(&info);
7464 LOG((LF_CORDB,LL_INFO10000, "DS::TSS: Returning false Case 1!\n"));
7469 LOG((LF_CORDB,LL_INFO10000, "DS::TSS: Returning true Case 2 for reason STEP_%02x!\n", m_reason));
7471 // @todo - when would a single-step (not a patch) land us in user-code?
7472 // For a JMC stepper, we have an additional constraint:
7473 // skip non-user code. So if we're still in non-user code, then
7474 // we've got to keep going
7475 DebuggerMethodInfo * dmi = g_pDebugger->GetOrCreateMethodInfo(fd->GetModule(), fd->GetMemberDef());
7477 if ((dmi != NULL) && DetectHandleNonUserCode(&info, dmi))
7480 PrepareForSendEvent(ticket);
7485 void DebuggerStepper::TriggerTraceCall(Thread *thread, const BYTE *ip)
7487 LOG((LF_CORDB,LL_INFO10000,"DS::TTC this:0x%x, @ ip:0x%x\n",this,ip));
7488 TraceDestination trace;
7492 LOG((LF_CORDB,LL_INFO10000,"DS::TTC exit b/c of Frozen\n"));
7496 // This is really just a heuristic. We don't want to trigger a JMC probe when we are
7497 // executing in an IL stub, or in one of the marshaling methods called by the IL stub.
7498 // The problem is that the IL stub can call into arbitrary code, including custom marshalers.
7499 // In that case the user has to put a breakpoint to stop in the code.
7500 if (g_pEEInterface->DetectHandleILStubs(thread))
7505 if (g_pEEInterface->TraceStub(ip, &trace)
7506 && g_pEEInterface->FollowTrace(&trace)
7507 && PatchTrace(&trace, LEAF_MOST_FRAME,
7508 (m_rgfMappingStop&STOP_UNMANAGED)?(true):(false)))
7510 // !!! We really want to know ahead of time if PatchTrace will succeed.
7512 PatchTrace(&trace, LEAF_MOST_FRAME, (m_rgfMappingStop&STOP_UNMANAGED)?
7515 // If we're triggering a trace call, and we're following a trace into either managed code or unjitted managed
7516 // code, then we need to update our stepper's reason to STEP_CALL to reflect the fact that we're going to land
7517 // into a new function because of a call.
7518 if ((trace.GetTraceType() == TRACE_UNJITTED_METHOD) || (trace.GetTraceType() == TRACE_MANAGED))
7520 m_reason = STEP_CALL;
7525 LOG((LF_CORDB, LL_INFO10000, "DS::TTC potentially a step call!\n"));
7529 void DebuggerStepper::TriggerUnwind(Thread *thread,
7530 MethodDesc *fd, DebuggerJitInfo * pDJI, SIZE_T offset,
7532 CorDebugStepReason unwindReason)
7536 THROWS; // from GetJitInfo
7537 GC_NOTRIGGER; // don't send IPC events
7538 MODE_COOPERATIVE; // TriggerUnwind always is coop
7540 PRECONDITION(!IsDbgHelperSpecialThread());
7541 PRECONDITION(fd->IsDynamicMethod() || (pDJI != NULL));
7545 LOG((LF_CORDB,LL_INFO10000,"DS::TU this:0x%p, in %s::%s, offset 0x%p "
7546 "frame:0x%p unwindReason:0x%x\n", this, fd->m_pszDebugClassName,
7547 fd->m_pszDebugMethodName, offset, fp.GetSPValue(), unwindReason));
7549 _ASSERTE(unwindReason == STEP_EXCEPTION_FILTER || unwindReason == STEP_EXCEPTION_HANDLER);
7553 LOG((LF_CORDB,LL_INFO10000,"DS::TTC exit b/c of Frozen\n"));
7557 if (IsCloserToRoot(fp, GetUnwind()))
7559 // Handler is in a parent frame . For all steps (in,out,over)
7560 // we want to stop in the handler.
7561 // This will be like a Step Out, so we don't need any range.
7566 // Handler/Filter is in the same frame as the stepper
7567 // For a step-in/over, we want to patch the handler/filter.
7568 // But for a step-out, we want to just continue executing (and don't change
7569 // the step-reason either).
7570 if (m_eMode == cStepOut)
7572 LOG((LF_CORDB, LL_INFO10000, "DS::TU Step-out, returning for same-frame case.\n"));
7578 // Remember the origin of the exception, so that if the step looks like
7579 // it's going to complete in a different frame, but the code comes from the
7580 // same frame as the one we're in, we won't stop twice in the "same" range
7585 // An exception is exiting the step region. Set a patch on
7586 // the filter/handler.
7592 fOk = AddBindAndActivateNativeManagedPatch(fd, pDJI, offset, LEAF_MOST_FRAME, NULL);
7594 // Since we're unwinding to an already executed method, the method should already
7595 // be jitted and placing the patch should work.
7596 CONSISTENCY_CHECK_MSGF(fOk, ("Failed to place patch at TriggerUnwind.\npThis=0x%p md=0x%p, native offset=0x%x\n", this, fd, offset));
7598 LOG((LF_CORDB,LL_INFO100000,"Step reason:%s\n", unwindReason==STEP_EXCEPTION_FILTER
7599 ? "STEP_EXCEPTION_FILTER":"STEP_EXCEPTION_HANDLER"));
7600 m_reason = unwindReason;
7604 // Prepare for sending an event.
7605 // This is called 1:1 w/ SendEvent, but this method can be called in a GC_TRIGGERABLE context
7606 // whereas SendEvent is pretty strict.
7607 // Caller ensures that it's safe to run a stack trace.
7608 void DebuggerStepper::PrepareForSendEvent(StackTraceTicket ticket)
7611 _ASSERTE(!m_fReadyToSend);
7612 m_fReadyToSend = true;
7615 LOG((LF_CORDB, LL_INFO10000, "DS::SE m_fpStepInto:0x%x\n", m_fpStepInto.GetSPValue()));
7617 if (m_fpStepInto != LEAF_MOST_FRAME)
7619 ControllerStackInfo csi;
7620 csi.GetStackInfo(ticket, GetThread(), LEAF_MOST_FRAME, NULL);
7622 if (csi.m_targetFrameFound &&
7623 #if !defined(FEATURE_EH_FUNCLETS)
7624 IsCloserToRoot(m_fpStepInto, csi.m_activeFrame.fp)
7626 IsCloserToRoot(m_fpStepInto, (csi.m_activeFrame.IsNonFilterFuncletFrame() ? csi.GetReturnFrame().fp : csi.m_activeFrame.fp))
7627 #endif // FEATURE_EH_FUNCLETS
7631 m_reason = STEP_CALL;
7632 LOG((LF_CORDB, LL_INFO10000, "DS::SE this:0x%x STEP_CALL!\n", this));
7637 LOG((LF_CORDB, LL_INFO10000, "DS::SE this:0x%x not a step call!\n", this));
7643 // Steppers should only stop in interesting code.
7644 if (this->GetDCType() == DEBUGGER_CONTROLLER_JMC_STEPPER)
7646 // If we're at either a patch or SS, we'll have a context.
7647 CONTEXT *context = g_pEEInterface->GetThreadFilterContext(GetThread());
7648 if (context == NULL)
7650 void * pIP = CORDbgGetIP(reinterpret_cast<DT_CONTEXT *>(context));
7652 DebuggerJitInfo * dji = g_pDebugger->GetJitInfoFromAddr((TADDR) pIP);
7653 DebuggerMethodInfo * dmi = NULL;
7656 dmi = dji->m_methodInfo;
7658 CONSISTENCY_CHECK_MSGF(dmi->IsJMCFunction(), ("JMC stepper %p stopping in non-jmc method, MD=%p, '%s::%s'",
7659 this, dji->m_nativeCodeVersion.GetMethodDesc(), dji->m_nativeCodeVersion.GetMethodDesc()->m_pszDebugClassName, dji->m_nativeCodeVersion.GetMethodDesc()->m_pszDebugMethodName));
7670 bool DebuggerStepper::SendEvent(Thread *thread, bool fIpChanged)
7675 SENDEVENT_CONTRACT_ITEMS;
7679 // We practically should never have a step interrupted by SetIp.
7680 // We'll still go ahead and send the Step-complete event because we've already
7681 // deactivated our triggers by now and we haven't placed any new patches to catch us.
7682 // We assert here because we don't believe we'll ever be able to hit this scenario.
7683 // This is technically an issue, but we consider it benign enough to leave in.
7684 _ASSERTE(!fIpChanged || !"Stepper interrupted by SetIp");
7686 LOG((LF_CORDB, LL_INFO10000, "DS::SE m_fpStepInto:0x%x\n", m_fpStepInto.GetSPValue()));
7688 _ASSERTE(m_fReadyToSend);
7689 _ASSERTE(GetThreadNULLOk() == thread);
7691 CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
7692 _ASSERTE(!ISREDIRECTEDTHREAD(thread));
7694 // We need to send the stepper and delete the controller because our stepper
7695 // no longer has any patches or other triggers that will let it send the step-complete event.
7696 g_pDebugger->SendStep(thread, context, this, m_reason);
7701 // Now that we've sent the event, we can stop recording information.
7702 StubManager::DbgFinishLog();
7708 void DebuggerStepper::ResetRange()
7712 TRACE_FREE(m_range);
7713 DeleteInteropSafe(m_range);
7719 //-----------------------------------------------------------------------------
7720 // Return true if this stepper is alive, but frozen. (we freeze when the stepper
7721 // enters a nested func-eval).
7722 //-----------------------------------------------------------------------------
7723 bool DebuggerStepper::IsFrozen()
7725 return (m_cFuncEvalNesting > 0);
7728 //-----------------------------------------------------------------------------
7729 // Returns true if this stepper is 'dead' - which happens if a non-frozen stepper
7730 // gets a func-eval exit.
7731 //-----------------------------------------------------------------------------
7732 bool DebuggerStepper::IsDead()
7734 return (m_cFuncEvalNesting < 0);
7737 // * ------------------------------------------------------------------------
7738 // * DebuggerJMCStepper routines
7739 // * ------------------------------------------------------------------------
7740 DebuggerJMCStepper::DebuggerJMCStepper(Thread *thread,
7741 CorDebugUnmappedStop rgfMappingStop,
7742 CorDebugIntercept interceptStop,
7743 AppDomain *appDomain) :
7744 DebuggerStepper(thread, rgfMappingStop, interceptStop, appDomain)
7746 LOG((LF_CORDB, LL_INFO10000, "DJMCStepper ctor, this=%p\n", this));
7749 DebuggerJMCStepper::~DebuggerJMCStepper()
7751 LOG((LF_CORDB, LL_INFO10000, "DJMCStepper dtor, this=%p\n", this));
7754 // If we're a JMC stepper, then don't stop in non-user code.
7755 bool DebuggerJMCStepper::IsInterestingFrame(FrameInfo * pFrame)
7765 DebuggerMethodInfo *pInfo = pFrame->GetMethodInfoFromFrameOrThrow();
7766 _ASSERTE(pInfo != NULL); // throws on failure
7768 bool fIsUserCode = pInfo->IsJMCFunction();
7771 LOG((LF_CORDB, LL_INFO1000000, "DS::TSO, frame '%s::%s' is '%s' code\n",
7772 pFrame->DbgGetClassName(), pFrame->DbgGetMethodName(),
7773 fIsUserCode ? "user" : "non-user"));
7778 // A JMC stepper's step-next stops at the next thing of code run.
7779 // This may be a Step-Out, or any User code called before that.
7780 // A1 -> B1 -> { A2, B2 -> B3 -> A3}
7781 // So TrapStepNex at end of A2 should land us in A3.
7782 void DebuggerJMCStepper::TrapStepNext(ControllerStackInfo *info)
7784 LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TrapStepNext, this=%p\n", this));
7785 EnableMethodEnter();
7787 // This will place a patch up the stack and set m_reason = STEP_RETURN.
7788 // If we end up hitting JMC before that patch, we'll hit TriggerMethodEnter
7789 // and that will set our reason to STEP_CALL.
7793 // ip - target address for call instruction
7794 bool DebuggerJMCStepper::TrapStepInHelper(
7795 ControllerStackInfo * pInfo,
7796 const BYTE * ipCallTarget,
7797 const BYTE * ipNext,
7798 bool fCallingIntoFunclet,
7801 #ifndef FEATURE_EH_FUNCLETS
7802 // There are no funclets on x86.
7803 _ASSERTE(!fCallingIntoFunclet);
7806 // If we are calling into a funclet, then we can't rely on the JMC probe to stop us because there are no
7807 // JMC probes in funclets. Instead, we have to perform a traditional step-in here.
7808 if (fCallingIntoFunclet)
7810 TraceDestination td;
7811 td.InitForManaged(reinterpret_cast<PCODE>(ipCallTarget));
7812 PatchTrace(&td, LEAF_MOST_FRAME, false);
7814 // If this succeeds, then we still need to put a patch at the return address. This is done below.
7815 // If this fails, then we definitely need to put a patch at the return address to trap the thread.
7816 // So in either case, we have to execute the rest of this function.
7821 MethodDesc * pDesc = pInfo->m_activeFrame.md;
7822 DebuggerJitInfo *dji = NULL;
7824 // We may not have a DJI if we're in an attach case. We should still be able to do a JMC-step in though.
7825 // So NULL is ok here.
7826 dji = g_pDebugger->GetJitInfo(pDesc, (const BYTE*) ipNext);
7828 // Place patch after call, which is at ipNext. Note we don't need an IL->Native map here
7829 // since we disassembled native code to find the ip after the call.
7830 SIZE_T offset = CodeRegionInfo::GetCodeRegionInfo(dji, pDesc).AddressToOffset(ipNext);
7833 LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TSIH, at '%s::%s', calling=%p, next=%p, offset=%d\n",
7834 pDesc->m_pszDebugClassName,
7835 pDesc->m_pszDebugMethodName,
7836 ipCallTarget, ipNext,
7839 // Place a patch at the native address (inside the managed method).
7840 AddBindAndActivateNativeManagedPatch(pInfo->m_activeFrame.md,
7843 pInfo->GetReturnFrame().fp,
7847 EnableMethodEnter();
7849 // Return true means that we want to let the stepper run free. It will either
7850 // hit the patch after the call instruction or it will hit a TriggerMethodEnter.
7854 // For JMC-steppers, we don't enable trace-call; we enable Method-Enter.
7855 void DebuggerJMCStepper::EnablePolyTraceCall()
7857 _ASSERTE(!IsFrozen());
7859 this->EnableMethodEnter();
7862 // Return true if this is non-user code. This means we've setup the proper patches &
7863 // triggers, etc and so we expect the controller to just run free.
7864 // This is called when all other stepping criteria are met and we're about to
7865 // send a step-complete. For JMC, this is when we see if we're in non-user code
7866 // and if so, continue stepping instead of send the step complete.
7867 // Return false if this is user-code.
7868 bool DebuggerJMCStepper::DetectHandleNonUserCode(ControllerStackInfo *pInfo, DebuggerMethodInfo * dmi)
7870 _ASSERTE(dmi != NULL);
7871 bool fIsUserCode = dmi->IsJMCFunction();
7875 LOG((LF_CORDB, LL_INFO10000, "JMC stepper stopped in non-user code, continuing.\n"));
7876 // Not-user code, we want to skip through this.
7878 // We may be here while trying to step-out.
7879 // Step-out just means stop at the first interesting frame above us.
7880 // So JMC TrapStepOut won't patch a non-user frame.
7881 // But if we're skipping over other stuff (prolog, epilog, interceptors,
7882 // trace calls), then we may still be in the middle of non-user
7883 //_ASSERTE(m_eMode != cStepOut);
7885 if (m_eMode == cStepOut)
7891 EnableMethodEnter();
7893 // Run until we hit the next thing of managed code.
7895 // Do a traditional step-out since we just want to go up 1 frame.
7896 TrapStepOut(pInfo, true); // force trad step out.
7899 // If we're not in the original frame anymore, then
7900 // If we did a Step-over at the end of a method, and that did a single-step over the return
7901 // then we may already be in our parent frame. In that case, we also want to behave
7902 // like a step-in and TriggerMethodEnter.
7903 if (this->m_fp != pInfo->m_activeFrame.fp)
7905 // If we're a step-over, then we should only be stopped in a parent frame.
7906 _ASSERTE(m_stepIn || IsCloserToLeaf(this->m_fp, pInfo->m_activeFrame.fp));
7907 EnableMethodEnter();
7910 // Step-over shouldn't stop in a frame below us in the same callstack.
7911 // So we do a tradional step-out of our current frame, which guarantees
7912 // that. After that, we act just like a step-in.
7917 // Must keep going...
7924 // Dispatched right after the prolog of a JMC function.
7925 // We may be blocking the GC here, so let's be fast!
7926 void DebuggerJMCStepper::TriggerMethodEnter(Thread * thread,
7927 DebuggerJitInfo *dji,
7931 _ASSERTE(dji != NULL);
7932 _ASSERTE(thread != NULL);
7933 _ASSERTE(ip != NULL);
7935 _ASSERTE(!IsFrozen());
7937 MethodDesc * pDesc = dji->m_nativeCodeVersion.GetMethodDesc();
7938 LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, desc=%p, addr=%p\n",
7941 // JMC steppers won't stop in Lightweight delegates. Just return & keep executing.
7942 if (pDesc->IsNoMetadata())
7944 LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TME, skipping b/c it's lw-codegen\n"));
7948 // Is this user code?
7949 DebuggerMethodInfo * dmi = dji->m_methodInfo;
7950 bool fIsUserCode = dmi->IsJMCFunction();
7953 LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TME, '%s::%s' is '%s' code\n",
7954 pDesc->m_pszDebugClassName,
7955 pDesc->m_pszDebugMethodName,
7956 fIsUserCode ? "user" : "non-user"
7959 // If this isn't user code, then just return and continue executing.
7963 // MethodEnter is only enabled when we want to stop in a JMC function.
7964 // And that's where we are now. So patch the ip and resume.
7965 // The stepper will hit the patch, and stop.
7967 // It's a good thing we have the fp passed in, because we have no other
7968 // way of getting it. We can't do a stack trace here (the stack trace
7969 // would start at the last pushed Frame, which miss a lot of managed
7972 // Don't bind to a particular AppDomain so that we can do a Cross-Appdomain step.
7973 AddBindAndActivateNativeManagedPatch(pDesc,
7975 CodeRegionInfo::GetCodeRegionInfo(dji, pDesc).AddressToOffset(ip),
7980 LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, after setting patch to stop\n"));
7982 // Once we resume, we'll go hit that patch (duh, we patched our return address)
7983 // Furthermore, we know the step will complete with reason = call, so set that now.
7984 m_reason = STEP_CALL;
7989 //-----------------------------------------------------------------------------
7990 // Helper to convert form an EE Frame's interception enum to a CorDebugIntercept
7992 // The intercept value in EE Frame's is a 0-based enumeration (not a bitfield).
7993 // The intercept value for ICorDebug is a bitfied.
7994 //-----------------------------------------------------------------------------
7995 CorDebugIntercept ConvertFrameBitsToDbg(Frame::Interception i)
7997 _ASSERTE(i >= 0 && i < Frame::INTERCEPTION_COUNT);
7999 // Since the ee frame is a 0-based enum, we can just use a map.
8000 const CorDebugIntercept map[Frame::INTERCEPTION_COUNT] =
8002 // ICorDebug EE Frame
8003 INTERCEPT_NONE, // INTERCEPTION_NONE,
8004 INTERCEPT_CLASS_INIT, // INTERCEPTION_CLASS_INIT
8005 INTERCEPT_EXCEPTION_FILTER, // INTERCEPTION_EXCEPTION
8006 INTERCEPT_CONTEXT_POLICY, // INTERCEPTION_CONTEXT
8007 INTERCEPT_SECURITY, // INTERCEPTION_SECURITY
8008 INTERCEPT_INTERCEPTION, // INTERCEPTION_OTHER
8014 //-----------------------------------------------------------------------------
8015 // This is a helper class to do a stack walk over a certain range and find all the interceptors.
8016 // This allows a JMC stepper to see if there are any interceptors it wants to skip over (though
8017 // there's nothing JMC-specific about this).
8018 // Note that we only want to walk the stack range that the stepper is operating in.
8019 // That's because we don't care about interceptors that happened _before_ the
8020 // stepper was created.
8021 //-----------------------------------------------------------------------------
8022 class InterceptorStackInfo
8026 InterceptorStackInfo()
8028 // since this ctor just nulls out fpTop (which is already done in Init), we
8029 // only need it in debug.
8030 m_fpTop = LEAF_MOST_FRAME;
8034 // Get a CorDebugIntercept bitfield that contains a bit for each type of interceptor
8035 // if that interceptor is present within our stack-range.
8036 // Stack range is from leaf-most up to and including fp
8037 CorDebugIntercept GetInterceptorsInRange()
8039 _ASSERTE(m_fpTop != LEAF_MOST_FRAME || !"Must call Init first");
8040 return (CorDebugIntercept) m_bits;
8043 // Prime the stackwalk.
8044 void Init(FramePointer fpTop, Thread *thread, CONTEXT *pContext, BOOL contextValid)
8046 _ASSERTE(fpTop != LEAF_MOST_FRAME);
8047 _ASSERTE(thread != NULL);
8052 LOG((LF_CORDB,LL_EVERYTHING, "ISI::Init - fpTop=%p, thread=%p, pContext=%p, contextValid=%d\n",
8053 fpTop.GetSPValue(), thread, pContext, contextValid));
8056 result = DebuggerWalkStack(
8069 // This is a bitfield of all the interceptors we encounter in our stack-range
8072 // This is the top of our stack range.
8073 FramePointer m_fpTop;
8075 static StackWalkAction WalkStack(FrameInfo *pInfo, void *data)
8077 _ASSERTE(pInfo != NULL);
8078 _ASSERTE(data != NULL);
8079 InterceptorStackInfo * pThis = (InterceptorStackInfo*) data;
8081 // If there's an interceptor frame here, then set those
8082 // bits in our bitfield.
8083 Frame::Interception i = Frame::INTERCEPTION_NONE;
8084 Frame * pFrame = pInfo->frame;
8085 if ((pFrame != NULL) && (pFrame != FRAME_TOP))
8087 i = pFrame->GetInterception();
8088 if (i != Frame::INTERCEPTION_NONE)
8090 pThis->m_bits |= (int) ConvertFrameBitsToDbg(i);
8093 else if (pInfo->HasMethodFrame())
8095 // Check whether we are executing in a class constructor.
8096 _ASSERTE(pInfo->md != NULL);
8098 // Need to be careful about an off-by-one error here! Imagine your stack looks like:
8099 // Foo.DoSomething()
8100 // Foo..cctor <--- step starts/ends in here
8103 // and your code looks like this:
8106 // Foo.DoSomething(); <-- JMC step started here
8107 // int x = 1; <-- step ends here
8109 // This stackwalk covers the inclusive range [Foo..cctor, Foo.DoSomething()] so we will see
8110 // the static cctor in this walk. However executing inside a static class constructor does not
8111 // count as an interceptor. You must start the step outside the static constructor and then call
8112 // into it to have an interceptor. Therefore only static constructors that aren't the outermost
8113 // frame should be treated as interceptors.
8114 if (pInfo->md->IsClassConstructor() && (pInfo->fp != pThis->m_fpTop))
8116 // We called a class constructor, add the appropriate flag
8117 pThis->m_bits |= (int) INTERCEPT_CLASS_INIT;
8120 LOG((LF_CORDB,LL_EVERYTHING,"ISI::WS- Frame=%p, fp=%p, Frame bits=%x, Cor bits=0x%x\n", pInfo->frame, pInfo->fp.GetSPValue(), i, pThis->m_bits));
8123 // We can stop once we hit the top frame.
8124 if (pInfo->fp == pThis->m_fpTop)
8130 return SWA_CONTINUE;
8138 // Skip interceptors for JMC steppers.
8139 // Return true if we patch something (and thus should keep stepping)
8140 // Return false if we're done.
8141 bool DebuggerJMCStepper::DetectHandleInterceptors(ControllerStackInfo * info)
8143 LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: Start DetectHandleInterceptors\n"));
8145 // For JMC, we could stop very far way from an interceptor.
8146 // So we have to do a stack walk to search for interceptors...
8147 // If we find any in our stack range (from m_fp ... current fp), then we just do a trap-step-next.
8149 // Note that this logic should also work for regular steppers, but we've left that in
8150 // as to keep that code-path unchanged.
8152 // ControllerStackInfo only gives us the bottom 2 frames on the stack, so we ignore it and
8153 // have to do our own stack walk.
8155 // @todo - for us to properly skip filters, we need to make sure that filters show up in our chains.
8158 InterceptorStackInfo info2;
8159 CONTEXT *context = g_pEEInterface->GetThreadFilterContext(this->GetThread());
8160 CONTEXT tempContext;
8162 _ASSERTE(!ISREDIRECTEDTHREAD(this->GetThread()));
8164 if (context == NULL)
8166 info2.Init(this->m_fp, this->GetThread(), &tempContext, FALSE);
8170 info2.Init(this->m_fp, this->GetThread(), context, TRUE);
8173 // The following casts are safe on WIN64 platforms.
8174 int iOnStack = (int) info2.GetInterceptorsInRange();
8175 int iSkip = ~((int) m_rgfInterceptStop);
8177 LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: iOnStack=%x, iSkip=%x\n", iOnStack, iSkip));
8179 // If the bits on the stack contain any interceptors we want to skip, then we need to keep going.
8180 if ((iOnStack & iSkip) != 0)
8182 LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: keep going!\n"));
8188 LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: Done!!\n"));
8193 // * ------------------------------------------------------------------------
8194 // * DebuggerThreadStarter routines
8195 // * ------------------------------------------------------------------------
8197 DebuggerThreadStarter::DebuggerThreadStarter(Thread *thread)
8198 : DebuggerController(thread, NULL)
8200 LOG((LF_CORDB, LL_INFO1000, "DTS::DTS: this: %p Thread: %p\n",
8203 // Check to make sure we only have 1 ThreadStarter on a given thread. (Inspired by NDPWhidbey issue 16888)
8205 EnsureUniqueThreadStarter(this);
8209 // TP_RESULT DebuggerThreadStarter::TriggerPatch() If we're in a
8210 // stub (module==NULL&&managed) then do a PatchTrace up the stack &
8211 // return false. Otherwise DisableAll & return
8213 TP_RESULT DebuggerThreadStarter::TriggerPatch(DebuggerControllerPatch *patch,
8217 Module *module = patch->key.module;
8218 BOOL managed = patch->IsManagedPatch();
8220 LOG((LF_CORDB,LL_INFO1000, "DebuggerThreadStarter::TriggerPatch for thread 0x%x\n", Debugger::GetThreadIdHelper(thread)));
8222 if (module == NULL && managed)
8224 // This is a stub patch. If it was a TRACE_FRAME_PUSH that got us here, then the stub's frame is pushed now, so
8225 // we tell the frame to apply the real patch. If we got here via a TRACE_MGR_PUSH, however, then there is no
8226 // frame and we go back to the stub manager that generated the stub for where to patch next.
8227 TraceDestination trace;
8229 if (patch->trace.GetTraceType() == TRACE_MGR_PUSH)
8232 CONTEXT *context = GetManagedLiveCtx(thread);
8233 CONTRACT_VIOLATION(GCViolation);
8234 traceOk = g_pEEInterface->TraceManager(thread, patch->trace.GetStubManager(), &trace, context, &dummy);
8236 else if ((patch->trace.GetTraceType() == TRACE_FRAME_PUSH) && (thread->GetFrame()->IsTransitionToNativeFrame()))
8238 // If we've got a frame that is transitioning to native, there's no reason to try to keep tracing. So we
8239 // bail early and save ourselves some effort. This also works around a problem where we deadlock trying to
8240 // do too much work to determine the destination of a ComPlusMethodFrame. (See issue 87103.)
8242 // Note: trace call is still enabled, so we can just ignore this patch and wait for trace call to fire
8248 // It's questionable whether Trace_Frame_Push is actually safe or not.
8249 ControllerStackInfo csi;
8250 StackTraceTicket ticket(patch);
8251 csi.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, NULL);
8253 CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers
8254 traceOk = g_pEEInterface->TraceFrame(thread, thread->GetFrame(), TRUE, &trace, &(csi.m_activeFrame.registers));
8257 if (traceOk && g_pEEInterface->FollowTrace(&trace))
8259 PatchTrace(&trace, LEAF_MOST_FRAME, TRUE);
8266 // We've hit user code; trigger our event.
8272 // Give the helper thread a chance to get ready. The temporary helper can't handle
8273 // execution control well, and the RS won't do any execution control until it gets a
8274 // create Thread event, which it won't get until here.
8275 // So now's our best time to wait for the real helper thread.
8276 g_pDebugger->PollWaitingForHelper();
8283 void DebuggerThreadStarter::TriggerTraceCall(Thread *thread, const BYTE *ip)
8285 LOG((LF_CORDB, LL_EVERYTHING, "DTS::TTC called\n"));
8286 #ifdef DEBUGGING_SUPPORTED
8287 if (thread->GetDomain()->IsDebuggerAttached())
8289 TraceDestination trace;
8291 if (g_pEEInterface->TraceStub(ip, &trace) && g_pEEInterface->FollowTrace(&trace))
8293 PatchTrace(&trace, LEAF_MOST_FRAME, true);
8296 #endif //DEBUGGING_SUPPORTED
8300 bool DebuggerThreadStarter::SendEvent(Thread *thread, bool fIpChanged)
8305 SENDEVENT_CONTRACT_ITEMS;
8309 // This SendEvent can't be interrupted by a SetIp because until the client
8310 // gets a ThreadStarter event, it doesn't even know the thread exists, so
8311 // it certainly can't change its ip.
8312 _ASSERTE(!fIpChanged);
8314 LOG((LF_CORDB, LL_INFO10000, "DTS::SE: in DebuggerThreadStarter's SendEvent\n"));
8316 // Send the thread started event.
8317 g_pDebugger->ThreadStarted(thread);
8319 // We delete this now because its no longer needed. We can call
8320 // delete here because the queued count is above 0. This object
8321 // will really be deleted when its dequeued shortly after this
8328 // * ------------------------------------------------------------------------
8329 // * DebuggerUserBreakpoint routines
8330 // * ------------------------------------------------------------------------
8332 bool DebuggerUserBreakpoint::IsFrameInDebuggerNamespace(FrameInfo * pFrame)
8342 // Steppers ignore internal frames, so should only be called on real frames.
8343 _ASSERTE(pFrame->HasMethodFrame());
8345 // Now get the namespace of the active frame
8346 MethodDesc *pMD = pFrame->md;
8350 MethodTable * pMT = pMD->GetMethodTable();
8352 LPCUTF8 szNamespace = NULL;
8353 LPCUTF8 szClassName = pMT->GetFullyQualifiedNameInfo(&szNamespace);
8355 if (szClassName != NULL && szNamespace != NULL)
8357 MAKE_WIDEPTR_FROMUTF8(wszNamespace, szNamespace); // throw
8358 MAKE_WIDEPTR_FROMUTF8(wszClassName, szClassName);
8359 if (u16_strcmp(wszClassName, W("Debugger")) == 0 &&
8360 u16_strcmp(wszNamespace, W("System.Diagnostics")) == 0)
8362 // This will continue stepping
8370 // Helper check if we're directly in a dynamic method (ignoring any chain goo
8371 // or stuff in the Debugger namespace.
8372 class IsLeafFrameDynamic
8375 static StackWalkAction WalkStackWrapper(FrameInfo *pInfo, void *data)
8377 IsLeafFrameDynamic * pThis = reinterpret_cast<IsLeafFrameDynamic*> (data);
8378 return pThis->WalkStack(pInfo);
8381 StackWalkAction WalkStack(FrameInfo *pInfo)
8383 _ASSERTE(pInfo != NULL);
8385 // A FrameInfo may have both Method + Chain rolled into one.
8386 if (!pInfo->HasMethodFrame() && !pInfo->HasStubFrame())
8388 // We're a chain. Ignore it and keep looking.
8389 return SWA_CONTINUE;
8392 // So now this is the first non-chain, non-Debugger namespace frame.
8393 // LW frames don't have a name, so we check if it's LW first.
8394 if (pInfo->eStubFrameType == STUBFRAME_LIGHTWEIGHT_FUNCTION)
8396 m_fInLightWeightMethod = true;
8400 // Ignore Debugger.Break() frames.
8401 // All Debugger.Break calls will have this on the stack.
8402 if (DebuggerUserBreakpoint::IsFrameInDebuggerNamespace(pInfo))
8404 return SWA_CONTINUE;
8407 // We've now determined leafmost thing, so stop stackwalking.
8408 _ASSERTE(m_fInLightWeightMethod == false);
8413 bool m_fInLightWeightMethod;
8415 // Need this context to do stack trace.
8416 CONTEXT m_tempContext;
8419 // On success, copies the leafmost non-chain frameinfo (including stubs) for the current thread into pInfo
8420 // and returns true.
8421 // On failure, returns false.
8422 // Return true on success.
8423 bool DoCheck(IN Thread * pThread)
8431 PRECONDITION(CheckPointer(pThread));
8435 m_fInLightWeightMethod = false;
8441 &m_tempContext, false,
8444 TRUE // includes everything
8447 // We don't care whether the stackwalk succeeds or not because the
8448 // callback sets our status via this field either way, so just return it.
8449 return m_fInLightWeightMethod;
8453 // Handle a Debug.Break() notification.
8454 // This may create a controller to step-out out the Debug.Break() call (so that
8455 // we appear stopped at the callsite).
8456 // If we can't step-out (eg, we're directly in a dynamic method), then send
8457 // the debug event immediately.
8458 void DebuggerUserBreakpoint::HandleDebugBreak(Thread * pThread)
8460 bool fDoStepOut = true;
8462 // If the leaf frame is not a LW method, then step-out.
8463 IsLeafFrameDynamic info;
8464 fDoStepOut = !info.DoCheck(pThread);
8468 // Create a controller that will step out for us.
8469 new (interopsafe) DebuggerUserBreakpoint(pThread);
8473 // Send debug event immediately.
8474 g_pDebugger->SendUserBreakpointAndSynchronize(pThread);
8479 DebuggerUserBreakpoint::DebuggerUserBreakpoint(Thread *thread)
8480 : DebuggerStepper(thread, (CorDebugUnmappedStop) (STOP_ALL & ~STOP_UNMANAGED), INTERCEPT_ALL, NULL)
8482 // Setup a step out from the current frame (which we know is
8483 // unmanaged, actually...)
8486 // This happens to be safe, but it's a very special case (so we have a special case ticket)
8487 // This is called while we're live (so no filter context) and from the fcall,
8488 // and we pushed a HelperMethodFrame to protect us. We also happen to know that we have
8489 // done anything illegal or dangerous since then.
8491 StackTraceTicket ticket(this);
8492 StepOut(LEAF_MOST_FRAME, ticket);
8496 // Is this frame interesting?
8497 // Use this to skip all code in the namespace "Debugger.Diagnostics"
8498 bool DebuggerUserBreakpoint::IsInterestingFrame(FrameInfo * pFrame)
8508 return !IsFrameInDebuggerNamespace(pFrame);
8511 bool DebuggerUserBreakpoint::SendEvent(Thread *thread, bool fIpChanged)
8516 SENDEVENT_CONTRACT_ITEMS;
8520 // See DebuggerStepper::SendEvent for why we assert here.
8521 // This is technically an issue, but it's too benign to fix.
8522 _ASSERTE(!fIpChanged);
8524 LOG((LF_CORDB, LL_INFO10000,
8525 "DUB::SE: in DebuggerUserBreakpoint's SendEvent\n"));
8527 // Send the user breakpoint event.
8528 g_pDebugger->SendRawUserBreakpoint(thread);
8530 // We delete this now because its no longer needed. We can call
8531 // delete here because the queued count is above 0. This object
8532 // will really be deleted when its dequeued shortly after this
8539 // * ------------------------------------------------------------------------
8540 // * DebuggerFuncEvalComplete routines
8541 // * ------------------------------------------------------------------------
8543 DebuggerFuncEvalComplete::DebuggerFuncEvalComplete(Thread *thread,
8545 : DebuggerController(thread, NULL)
8548 m_pDE = reinterpret_cast<DebuggerEvalBreakpointInfoSegment*>(((DWORD)dest) & ~THUMB_CODE)->m_associatedDebuggerEval;
8550 m_pDE = reinterpret_cast<DebuggerEvalBreakpointInfoSegment*>(dest)->m_associatedDebuggerEval;
8553 // Add an unmanaged patch at the destination.
8554 AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE*)dest, LEAF_MOST_FRAME, FALSE, TRACE_UNMANAGED);
8557 TP_RESULT DebuggerFuncEvalComplete::TriggerPatch(DebuggerControllerPatch *patch,
8562 // It had better be an unmanaged patch...
8563 _ASSERTE((patch->key.module == NULL) && !patch->IsManagedPatch());
8565 // set ThreadFilterContext back here because we need make stack crawlable! In case,
8566 // GC got triggered.
8568 // Restore the thread's context to what it was before we hijacked it for this func eval.
8569 CONTEXT *pCtx = GetManagedLiveCtx(thread);
8570 #ifdef FEATURE_DATABREAKPOINT
8572 #error Not supported
8573 #endif // TARGET_UNIX
8574 #if defined(TARGET_X86) || defined(TARGET_AMD64)
8575 // If a data breakpoint is set while we hit a breakpoint inside a FuncEval, this will make sure the data breakpoint stays
8576 m_pDE->m_context.Dr0 = pCtx->Dr0;
8577 m_pDE->m_context.Dr1 = pCtx->Dr1;
8578 m_pDE->m_context.Dr2 = pCtx->Dr2;
8579 m_pDE->m_context.Dr3 = pCtx->Dr3;
8580 m_pDE->m_context.Dr6 = pCtx->Dr6;
8581 m_pDE->m_context.Dr7 = pCtx->Dr7;
8583 #error Not supported
8586 CORDbgCopyThreadContext(reinterpret_cast<DT_CONTEXT *>(pCtx),
8587 reinterpret_cast<DT_CONTEXT *>(&(m_pDE->m_context)));
8589 // We've hit our patch, so simply disable all (which removes the
8590 // patch) and trigger the event.
8595 bool DebuggerFuncEvalComplete::SendEvent(Thread *thread, bool fIpChanged)
8600 SENDEVENT_CONTRACT_ITEMS;
8605 // This should not ever be interrupted by a SetIp.
8606 // The BP will be off in random native code for which SetIp would be illegal.
8607 // However, func-eval conroller will restore the context from when we're at the patch,
8608 // so that will look like the IP changed on us.
8609 _ASSERTE(fIpChanged);
8611 LOG((LF_CORDB, LL_INFO10000, "DFEC::SE: in DebuggerFuncEval's SendEvent\n"));
8613 _ASSERTE(!ISREDIRECTEDTHREAD(thread));
8615 // The DebuggerEval is at our faulting address.
8616 DebuggerEval *pDE = m_pDE;
8618 // Send the func eval complete (or exception) event.
8619 g_pDebugger->FuncEvalComplete(thread, pDE);
8621 // We delete this now because its no longer needed. We can call
8622 // delete here because the queued count is above 0. This object
8623 // will really be deleted when its dequeued shortly after this
8630 #ifdef EnC_SUPPORTED
8632 // * ------------------------------------------------------------------------ *
8633 // * DebuggerEnCBreakpoint routines
8634 // * ------------------------------------------------------------------------ *
8636 //---------------------------------------------------------------------------------------
8638 // DebuggerEnCBreakpoint constructor - creates and activates a new EnC breakpoint
8641 // offset - IL offset in the function to place the patch
8642 // jitInfo - identifies the function in which the breakpoint is being placed
8643 // fTriggerType - breakpoint type: either REMAP_PENDING or REMAP_COMPLETE
8644 // pAppDomain - the breakpoint applies to the specified AppDomain only
8647 DebuggerEnCBreakpoint::DebuggerEnCBreakpoint(SIZE_T offset,
8648 DebuggerJitInfo *jitInfo,
8649 DebuggerEnCBreakpoint::TriggerType fTriggerType,
8650 AppDomain *pAppDomain)
8651 : DebuggerController(NULL, pAppDomain),
8653 m_fTriggerType(fTriggerType)
8655 _ASSERTE( m_jitInfo != NULL );
8658 MethodDesc* pMD = m_jitInfo->m_nativeCodeVersion.GetMethodDesc();
8659 if (m_fTriggerType == DebuggerEnCBreakpoint::REMAP_COMPLETE)
8661 success = AddBindAndActivateNativeManagedPatch(pMD, m_jitInfo, offset, LEAF_MOST_FRAME, pAppDomain);
8665 _ASSERTE(m_fTriggerType == DebuggerEnCBreakpoint::REMAP_PENDING);
8667 // Add and activate the specified patch
8668 Module* module = m_jitInfo->m_pLoaderModule;
8669 mdMethodDef tkMethod = pMD->GetMemberDef();
8670 SIZE_T encVersion = m_jitInfo->m_encVersion;
8671 success = AddILPatch(pAppDomain, module, tkMethod, NULL, encVersion, offset, TRUE);
8674 _ASSERTE(success != FALSE);
8676 LOG((LF_ENC,LL_INFO1000, "DEnCBP::DEnCBP adding %s patch to 0x%x encVersion: %zx\n",
8677 fTriggerType == REMAP_PENDING ? "ResumePending" : "ResumeComplete", pMD->GetMemberDef(), m_jitInfo->m_encVersion));
8680 //---------------------------------------------------------------------------------------
8682 // DebuggerEnCBreakpoint::TriggerPatch
8683 // called by the debugging infrastructure when the patch is hit.
8686 // patch - specifies the patch that was hit
8687 // thread - identifies the thread on which the patch was hit
8688 // tyWhy - TY_SHORT_CIRCUIT for normal REMAP_PENDING EnC patches
8691 // TPR_IGNORE if the debugger chooses not to take a remap opportunity
8692 // TPR_IGNORE_AND_STOP when a remap-complete event is sent
8693 // Doesn't return at all if the debugger remaps execution to the new version of the method
8695 TP_RESULT DebuggerEnCBreakpoint::TriggerPatch(DebuggerControllerPatch *patch,
8699 _ASSERTE(HasLock());
8701 LOG((LF_ENC, LL_ALWAYS,
8702 "DEnCBP::TP: Triggered EnC %s breakpoint: tid=%p, this=%p\n",
8703 m_fTriggerType == REMAP_PENDING ? "ResumePending" : "ResumeComplete", thread, this));
8705 // If this is a REMAP_COMPLETE patch, then dispatch the RemapComplete callback
8706 if (m_fTriggerType == REMAP_COMPLETE)
8708 return HandleRemapComplete(patch, thread, tyWhy);
8711 // This must be a REMAP_PENDING patch
8712 // unless we got here on an explicit short-circuit, don't do any work
8713 if (tyWhy != TY_SHORT_CIRCUIT)
8715 LOG((LF_ENC, LL_ALWAYS, "DEnCBP::TP: not short-circuit ... bailing\n"));
8719 // Map the current native offset back to the IL offset in the old
8720 // function. This will be mapped to the new native offset within
8721 // ResumeInUpdatedFunction
8722 CorDebugMappingResult map;
8724 SIZE_T currentIL = (SIZE_T)m_jitInfo->MapNativeOffsetToIL(patch->offset, &map, &which);
8726 // We only lay DebuggerEnCBreakpoints at sequence points
8727 _ASSERTE(map == MAPPING_EXACT);
8728 _ASSERTE(patch->IsManagedPatch());
8730 Module *module = patch->key.module;
8731 mdMethodDef md = patch->key.md;
8732 LOG((LF_ENC, LL_INFO10000,
8733 "DEnCBP::TP: methodDef=0x%08x, encVersion=%zx, IL offset=0x%zx\n",
8734 md, m_jitInfo->m_encVersion, currentIL));
8735 patch->LogInstance();
8737 // Grab the jit info for the original copy of the method, which is
8738 // what we are executing right now.
8739 DebuggerJitInfo* pJitInfo = m_jitInfo;
8740 // Grab the MethodDesc for this function.
8741 MethodDesc* pMD = pJitInfo->m_nativeCodeVersion.GetMethodDesc();
8743 _ASSERTE(pJitInfo != NULL);
8744 _ASSERTE(pMD != NULL);
8745 LOG((LF_ENC, LL_INFO10000,
8746 "DEnCBP::TP: DJI: %p pMD: %p (%s::%s)\n", pJitInfo, pMD, pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
8750 // Either the current method matches what we are executing or it is
8751 // the typical definition for a generic method (i.e., typical).
8752 MethodDesc* loadedMD = g_pEEInterface->FindLoadedMethodRefOrDef(module, md);
8753 _ASSERTE(loadedMD == pMD || loadedMD->IsTypicalMethodDefinition());
8757 // Grab the context for this thread. This is the context that was
8758 // passed to COMPlusFrameHandler.
8759 CONTEXT *pContext = GetManagedLiveCtx(thread);
8761 // We use the module the current function is in.
8762 _ASSERTE(module->IsEditAndContinueEnabled());
8763 EditAndContinueModule *pModule = (EditAndContinueModule*)module;
8765 // Release the controller lock for the rest of this method
8766 CrstBase::UnsafeCrstInverseHolder inverseLock(&g_criticalSection);
8768 // resumeIL is the IL offset in the new version of the method the debugger wants
8769 // to resume to. We'll pass the address of this variable over to the right-side
8770 // and if it modifies the contents while we're stopped dispatching the RemapOpportunity,
8771 // then we know it wants a remap.
8772 // This form of side-channel communication seems like an error-prone workaround. Ideally the
8773 // remap IL (if any) would just be returned in a response event.
8774 SIZE_T resumeIL = (SIZE_T) -1;
8776 // Debugging code to enable a break after N RemapOpportunities
8778 static int breakOnRemapOpportunity = -1;
8779 if (breakOnRemapOpportunity == -1)
8780 breakOnRemapOpportunity = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EnCBreakOnRemapOpportunity);
8782 static int remapOpportunityCount = 0;
8784 ++remapOpportunityCount;
8785 if (breakOnRemapOpportunity == 1 || breakOnRemapOpportunity == remapOpportunityCount)
8787 _ASSERTE(!"BreakOnRemapOpportunity");
8791 // Send an event to the RS to call the RemapOpportunity callback, passing the address of resumeIL.
8792 // If the debugger responds with a call to RemapFunction, the supplied IP will be copied into resumeIL
8793 // and we will know to update the context and resume the function at the new IP. Otherwise we just do
8794 // nothing and try again on next RemapFunction breakpoint
8795 g_pDebugger->LockAndSendEnCRemapEvent(pJitInfo, currentIL, &resumeIL);
8797 LOG((LF_ENC, LL_ALWAYS,
8798 "DEnCBP::TP: resume IL is %zx\n", resumeIL));
8800 // Has the debugger requested a remap?
8801 if (resumeIL != (SIZE_T) -1)
8803 // This will jit the function, update the context, and resume execution at the new location.
8804 g_pEEInterface->ResumeInUpdatedFunction(pModule,
8809 _ASSERTE(!"Returned from ResumeInUpdatedFunction!");
8812 LOG((LF_CORDB, LL_ALWAYS, "DEnCB::TP: We've returned from ResumeInUpdatedFunction"
8813 "we're going to skip the EnC patchId:0x%zx\n", patch->patchId));
8815 // We're returning then we'll have to re-get this lock. Be careful that we haven't kept any controller/patches
8816 // in the caller. They can move when we unlock, so when we release the lock and reget it here, things might have
8817 // changed underneath us.
8818 // inverseLock holder will reacquire lock.
8824 // HandleResumeComplete is called for an EnC patch in the newly updated function
8825 // so that we can notify the debugger that the remap has completed and they can
8826 // now remap their steppers or anything else that depends on the new code actually
8827 // being on the stack. We return TPR_IGNORE_AND_STOP because it's possible that the
8828 // function was edited after we handled remap complete and want to make sure we
8829 // start a fresh call to TriggerPatch
8831 TP_RESULT DebuggerEnCBreakpoint::HandleRemapComplete(DebuggerControllerPatch *patch,
8835 LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: HandleRemapComplete: %p\n", this));
8837 // Debugging code to enable a break after N RemapCompletes
8839 static int breakOnRemapComplete = -1;
8840 if (breakOnRemapComplete == -1)
8841 breakOnRemapComplete = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EnCBreakOnRemapComplete);
8843 static int remapCompleteCount = 0;
8844 ++remapCompleteCount;
8845 if (breakOnRemapComplete == 1 || breakOnRemapComplete == remapCompleteCount)
8847 _ASSERTE(!"BreakOnRemapComplete");
8850 _ASSERTE(HasLock());
8852 bool fApplied = m_jitInfo->m_encBreakpointsApplied;
8853 MethodDesc* pMD = m_jitInfo->m_nativeCodeVersion.GetMethodDesc();
8854 _ASSERTE(pMD != NULL);
8855 LOG((LF_ENC, LL_INFO10000, "DEnCBP::HRC: Applied: %s, pMD: %p (%s::%s)\n",
8856 (fApplied ? "true" : "false"), pMD, pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
8858 // Need to delete this before unlock below so if any other thread come in after the unlock
8859 // they won't handle this patch.
8863 // NOTE: We just deleted ourselves. Can't access anything any instances after this point.
8866 // if have somehow updated this function before we resume into it then just bail
8869 LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: function already updated, ignoring\n"));
8870 return TPR_IGNORE_AND_STOP;
8875 // Either the current method matches what we are executing or it is
8876 // the typical definition for a generic method (i.e., typical).
8877 MethodDesc* loadedMD = g_pEEInterface->FindLoadedMethodRefOrDef(patch->key.module, patch->key.md);
8878 _ASSERTE(loadedMD == pMD || loadedMD->IsTypicalMethodDefinition());
8882 LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: unlocking controller\n"));
8884 // Unlock the controller lock and dispatch the remap complete event
8885 CrstBase::UnsafeCrstInverseHolder inverseLock(&g_criticalSection);
8887 LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: sending RemapCompleteEvent\n"));
8889 g_pDebugger->LockAndSendEnCRemapCompleteEvent(pMD);
8891 // We're returning then we'll have to re-get this lock. Be careful that we haven't kept any controller/patches
8892 // in the caller. They can move when we unlock, so when we release the lock and reget it here, things might have
8893 // changed underneath us.
8894 // inverseLock holder will reacquire.
8896 return TPR_IGNORE_AND_STOP;
8898 #endif //EnC_SUPPORTED
8900 // continuable-exceptions
8901 // * ------------------------------------------------------------------------ *
8902 // * DebuggerContinuableExceptionBreakpoint routines
8903 // * ------------------------------------------------------------------------ *
8906 //---------------------------------------------------------------------------------------
8911 // pThread - the thread on which we are intercepting an exception
8912 // nativeOffset - This is the target native offset. It is where we are going to resume execution.
8913 // jitInfo - the DebuggerJitInfo of the method at which we are intercepting
8914 // pAppDomain - the AppDomain in which the thread is executing
8917 DebuggerContinuableExceptionBreakpoint::DebuggerContinuableExceptionBreakpoint(Thread *pThread,
8918 SIZE_T nativeOffset,
8919 DebuggerJitInfo *jitInfo,
8920 AppDomain *pAppDomain)
8921 : DebuggerController(pThread, pAppDomain)
8923 _ASSERTE( jitInfo != NULL );
8924 // Add a native patch at the specified native offset, which is where we are going to resume execution.
8925 AddBindAndActivateNativeManagedPatch(jitInfo->m_nativeCodeVersion.GetMethodDesc(), jitInfo, nativeOffset, LEAF_MOST_FRAME, pAppDomain);
8928 //---------------------------------------------------------------------------------------
8930 // This function is called when the patch added in the constructor is hit. At this point,
8931 // we have already resumed execution, and the exception is no longer in flight.
8934 // patch - the patch added in the constructor; unused
8935 // thread - the thread in question; unused
8936 // tyWhy - a flag which is only useful for EnC; unused
8939 // This function always returns TPR_TRIGGER, meaning that it wants to send an event to notify the RS.
8942 TP_RESULT DebuggerContinuableExceptionBreakpoint::TriggerPatch(DebuggerControllerPatch *patch,
8946 LOG((LF_CORDB, LL_INFO10000, "DCEBP::TP\n"));
8949 // Disable the patch
8953 // We will send a notification to the RS when the patch is triggered.
8957 //---------------------------------------------------------------------------------------
8959 // This function is called when we want to notify the RS that an interception is complete.
8960 // At this point, we have already resumed execution, and the exception is no longer in flight.
8963 // thread - the thread in question
8964 // fIpChanged - whether the IP has changed by SetIP after the patch is hit but
8965 // before this function is called
8968 bool DebuggerContinuableExceptionBreakpoint::SendEvent(Thread *thread, bool fIpChanged)
8973 SENDEVENT_CONTRACT_ITEMS;
8979 LOG((LF_CORDB, LL_INFO10000,
8980 "DCEBP::SE: in DebuggerContinuableExceptionBreakpoint's SendEvent\n"));
8984 g_pDebugger->SendInterceptExceptionComplete(thread);
8987 // On WIN64, by the time we get here the DebuggerExState is gone already.
8988 // ExceptionTrackers are cleaned up before we resume execution for a handled exception.
8989 #if !defined(FEATURE_EH_FUNCLETS)
8990 thread->GetExceptionState()->GetDebuggerState()->SetDebuggerInterceptContext(NULL);
8991 #endif // !FEATURE_EH_FUNCLETS
8995 // We delete this now because its no longer needed. We can call
8996 // delete here because the queued count is above 0. This object
8997 // will really be deleted when its dequeued shortly after this
9005 #ifdef FEATURE_DATABREAKPOINT
9007 /* static */ bool DebuggerDataBreakpoint::IsDataBreakpoint(Thread *thread, CONTEXT * pContext)
9009 bool hitDataBp = false;
9011 #error Not supported
9012 #endif // TARGET_UNIX
9013 #if defined(TARGET_X86) || defined(TARGET_AMD64)
9014 PDR6 pdr6 = (PDR6)&(pContext->Dr6);
9016 if (pdr6->B0 || pdr6->B1 || pdr6->B2 || pdr6->B3)
9020 #else // defined(TARGET_X86) || defined(TARGET_AMD64)
9021 #error Not supported
9022 #endif // defined(TARGET_X86) || defined(TARGET_AMD64)
9026 /* static */ bool DebuggerDataBreakpoint::TriggerDataBreakpoint(Thread *thread, CONTEXT * pContext)
9028 LOG((LF_CORDB, LL_INFO10000, "D::DDBP: Doing TriggerDataBreakpoint...\n"));
9030 bool hitDataBp = IsDataBreakpoint(thread, pContext);
9031 bool result = false;
9034 if (g_pDebugger->IsThreadAtSafePlace(thread))
9036 LOG((LF_CORDB, LL_INFO10000, "D::DDBP: HIT DATA BREAKPOINT...\n"));
9041 CONTEXT contextToAdjust;
9042 BOOL adjustedContext = FALSE;
9043 memcpy(&contextToAdjust, pContext, sizeof(CONTEXT));
9044 adjustedContext = g_pEEInterface->AdjustContextForJITHelpersForDebugger(&contextToAdjust);
9045 if (adjustedContext)
9047 LOG((LF_CORDB, LL_INFO10000, "D::DDBP: HIT DATA BREAKPOINT INSIDE WRITE BARRIER...\n"));
9048 DebuggerDataBreakpoint *pDataBreakpoint = new (interopsafe) DebuggerDataBreakpoint(thread);
9049 pDataBreakpoint->AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE*)GetIP(&contextToAdjust), FramePointer::MakeFramePointer(GetFP(&contextToAdjust)), true, DPT_DEFAULT_TRACE_TYPE);
9053 LOG((LF_CORDB, LL_INFO10000, "D::DDBP: HIT DATA BREAKPOINT BUT STILL NEED TO ROLL ...\n"));
9054 DebuggerDataBreakpoint *pDataBreakpoint = new (interopsafe) DebuggerDataBreakpoint(thread);
9055 pDataBreakpoint->EnableSingleStep();
9062 LOG((LF_CORDB, LL_INFO10000, "D::DDBP: DIDN'T TRIGGER DATA BREAKPOINT...\n"));
9068 TP_RESULT DebuggerDataBreakpoint::TriggerPatch(DebuggerControllerPatch *patch, Thread *thread, TRIGGER_WHY tyWhy)
9070 if (g_pDebugger->IsThreadAtSafePlace(thread))
9076 LOG((LF_CORDB, LL_INFO10000, "D::DDBP: REACH RETURN OF JIT HELPER BUT STILL NEED TO ROLL ...\n"));
9077 this->EnableSingleStep();
9082 bool DebuggerDataBreakpoint::TriggerSingleStep(Thread *thread, const BYTE *ip)
9084 if (g_pDebugger->IsThreadAtSafePlace(thread))
9086 LOG((LF_CORDB, LL_INFO10000, "D:DDBP: Finally safe for stopping, stop stepping\n"));
9087 this->DisableSingleStep();
9092 LOG((LF_CORDB, LL_INFO10000, "D:DDBP: Still not safe for stopping, continue stepping\n"));
9097 #endif // FEATURE_DATABREAKPOINT
9099 #endif // !DACCESS_COMPILE