2 // Copyright (c) Microsoft. All rights reserved.
3 // Licensed under the MIT license. See LICENSE file in the project root for full license information.
5 // ============================================================================
6 // File: stackwalktypes.h
9 // ============================================================================
10 // Contains types used by stackwalk.h.
13 #ifndef __STACKWALKTYPES_H__
14 #define __STACKWALKTYPES_H__
18 struct StackwalkCacheEntry;
21 // This type should be used internally inside the code manager only. EECodeInfo should
22 // be used in general code instead. Ideally, we would replace all uses of METHODTOKEN
27 METHODTOKEN(RangeSection * pRangeSection, TADDR pCodeHeader)
28 : m_pRangeSection(pRangeSection), m_pCodeHeader(pCodeHeader)
36 // Cache of RangeSection containing the code to avoid redundant lookups.
37 RangeSection * m_pRangeSection;
39 // CodeHeader* for EEJitManager
40 // PTR_RUNTIME_FUNCTION for managed native code
45 return m_pCodeHeader == NULL;
49 //************************************************************************
51 //************************************************************************
56 LookForMyCallersCaller = 2,
62 SWA_CONTINUE = 0, // continue walking
63 SWA_ABORT = 1, // stop walking, early out in "failure case"
64 SWA_FAILED = 2 // couldn't walk stack
67 #define SWA_DONE SWA_CONTINUE
70 // Pointer to the StackWalk callback function.
71 typedef StackWalkAction (*PSTACKWALKFRAMESCALLBACK)(
73 VOID* pData // Caller's private data
77 /******************************************************************************
78 StackwalkCache: new class implements stackwalk perf optimization features.
79 StackwalkCacheEntry array: very simple per thread hash table, keeping cached data.
80 StackwalkCacheUnwindInfo: used by EECodeManager::UnwindStackFrame to return
81 stackwalk cache flags.
82 Cf. Ilyakoz for any questions.
85 struct StackwalkCacheUnwindInfo
87 #if defined(_TARGET_AMD64_)
89 ULONG RSPOffsetFromUnwindInfo;
90 #else // !_TARGET_AMD64_
91 size_t securityObjectOffset; // offset of SecurityObject. 0 if there is no security object
92 BOOL fUseEbp; // Is EBP modified by the method - either for a frame-pointer or for a scratch-register?
93 BOOL fUseEbpAsFrameReg; // use EBP as the frame pointer?
94 #endif // !_TARGET_AMD64_
96 inline StackwalkCacheUnwindInfo() { SUPPORTS_DAC; ZeroMemory(this, sizeof(StackwalkCacheUnwindInfo)); }
97 StackwalkCacheUnwindInfo(StackwalkCacheEntry * pCacheEntry);
100 //************************************************************************
103 #define STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY 0x10
105 #define STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY 0x8
108 DECLSPEC_ALIGN(STACKWALK_CACHE_ENTRY_ALIGN_BOUNDARY)
109 struct StackwalkCacheEntry
112 // don't rearrange the fields, so that invalid value 0x8000000000000000 will never appear
113 // as StackwalkCacheEntry, it's required for atomicMOVQ using FILD/FISTP instructions
116 #if !defined(_TARGET_AMD64_)
117 WORD ESPOffset:15; // stack offset (frame size + pending arguments + etc)
118 WORD securityObjectOffset:3;// offset of SecurityObject. 0 if there is no security object
119 WORD fUseEbp:1; // For ESP methods, is EBP touched at all?
120 WORD fUseEbpAsFrameReg:1; // use EBP as the frame register?
121 WORD argSize:11; // size of args pushed on stack
122 #else // _TARGET_AMD64_
125 #endif // _TARGET_AMD64_
127 inline BOOL Init(UINT_PTR IP,
129 StackwalkCacheUnwindInfo *pUnwindInfo,
132 LIMITED_METHOD_CONTRACT;
136 #if defined(_TARGET_X86_)
137 this->ESPOffset = SPOffset;
138 this->argSize = argSize;
140 this->securityObjectOffset = (WORD)pUnwindInfo->securityObjectOffset;
141 _ASSERTE(this->securityObjectOffset == pUnwindInfo->securityObjectOffset);
143 this->fUseEbp = pUnwindInfo->fUseEbp;
144 this->fUseEbpAsFrameReg = pUnwindInfo->fUseEbpAsFrameReg;
145 _ASSERTE(!fUseEbpAsFrameReg || fUseEbp);
147 // return success if we fit SPOffset and argSize into
148 return ((this->ESPOffset == SPOffset) &&
149 (this->argSize == argSize));
150 #elif defined(_TARGET_AMD64_)
151 // The size of a stack frame is guaranteed to fit in 4 bytes, so we don't need to check RSPOffset and RBPOffset.
153 // The actual SP offset may be bigger than the offset we get from the unwind info because of stack allocations.
154 _ASSERTE(SPOffset >= pUnwindInfo->RSPOffsetFromUnwindInfo);
156 _ASSERTE(FitsIn<DWORD>(SPOffset));
157 this->RSPOffset = static_cast<DWORD>(SPOffset);
158 _ASSERTE(FitsIn<DWORD>(pUnwindInfo->RBPOffset + (SPOffset - pUnwindInfo->RSPOffsetFromUnwindInfo)));
159 this->RBPOffset = static_cast<DWORD>(pUnwindInfo->RBPOffset + (SPOffset - pUnwindInfo->RSPOffsetFromUnwindInfo));
161 #else // !_TARGET_X86_ && !_TARGET_AMD64_
163 #endif // !_TARGET_X86_ && !_TARGET_AMD64_
166 inline BOOL HasSecurityObject()
168 LIMITED_METHOD_CONTRACT;
170 #if defined(_TARGET_X86_)
171 return securityObjectOffset != 0;
172 #else // !_TARGET_X86_
173 // On AMD64 we don't save anything by grabbing the security object before it is needed. This is because
174 // we need to crack the GC info in order to find the security object, and to unwind we only need to
175 // crack the unwind info.
177 #endif // !_TARGET_X86_
180 inline BOOL IsSafeToUseCache()
182 LIMITED_METHOD_CONTRACT;
184 #if defined(_TARGET_X86_)
185 return (!fUseEbp || fUseEbpAsFrameReg);
186 #elif defined(_TARGET_AMD64_)
188 #else // !_TARGET_X86_ && !_TARGET_AMD64_
190 #endif // !_TARGET_X86_ && !_TARGET_AMD64_
194 #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
195 static_assert_no_msg(sizeof(StackwalkCacheEntry) == 2 * sizeof(UINT_PTR));
196 #endif // _TARGET_X86_ || _TARGET_AMD64_
198 //************************************************************************
202 friend struct _DacGlobals;
205 BOOL Lookup(UINT_PTR IP);
206 void Insert(StackwalkCacheEntry *pCacheEntry);
207 inline void ClearEntry () { LIMITED_METHOD_DAC_CONTRACT; m_CacheEntry.IP = 0; }
208 inline BOOL Enabled() { LIMITED_METHOD_DAC_CONTRACT; return s_Enabled; };
209 inline BOOL IsEmpty () { LIMITED_METHOD_CONTRACT; return m_CacheEntry.IP == 0; }
211 #ifndef DACCESS_COMPILE
216 StackwalkCacheEntry m_CacheEntry; // local copy of Global Cache entry for current IP
218 static void Invalidate(LoaderAllocator * pLoaderAllocator);
221 unsigned GetKey(UINT_PTR IP);
223 #ifdef DACCESS_COMPILE
224 // DAC can't rely on the cache here
225 const static BOOL s_Enabled;
227 static BOOL s_Enabled;
231 //************************************************************************
233 inline StackwalkCacheUnwindInfo::StackwalkCacheUnwindInfo(StackwalkCacheEntry * pCacheEntry)
235 LIMITED_METHOD_CONTRACT;
237 #if defined(_TARGET_AMD64_)
238 RBPOffset = pCacheEntry->RBPOffset;
239 #else // !_TARGET_AMD64_
240 securityObjectOffset = pCacheEntry->securityObjectOffset;
241 fUseEbp = pCacheEntry->fUseEbp;
242 fUseEbpAsFrameReg = pCacheEntry->fUseEbpAsFrameReg;
243 #endif // !_TARGET_AMD64_
246 #endif // __STACKWALKTYPES_H__