1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 #include "createdump.h"
6 #include <asm/ptrace.h>
8 #if defined(__aarch64__)
9 // See src/pal/src/include/pal/context.h
10 #define MCREG_Fp(mc) ((mc).regs[29])
11 #define MCREG_Lr(mc) ((mc).regs[30])
12 #define MCREG_Sp(mc) ((mc).sp)
13 #define MCREG_Pc(mc) ((mc).pc)
14 #define MCREG_Cpsr(mc) ((mc).pstate)
22 typedef int __ptrace_request;
25 #define FPREG_ErrorOffset(fpregs) *(DWORD*)&((fpregs).rip)
26 #define FPREG_ErrorSelector(fpregs) *(((WORD*)&((fpregs).rip)) + 2)
27 #define FPREG_DataOffset(fpregs) *(DWORD*)&((fpregs).rdp)
28 #define FPREG_DataSelector(fpregs) *(((WORD*)&((fpregs).rdp)) + 2)
30 extern CrashInfo* g_crashInfo;
32 ThreadInfo::ThreadInfo(pid_t tid) :
37 ThreadInfo::~ThreadInfo()
42 ThreadInfo::Initialize(ICLRDataTarget* pDataTarget)
44 if (!CrashInfo::GetStatus(m_tid, &m_ppid, &m_tgid, nullptr))
48 if (pDataTarget != nullptr)
50 if (!GetRegistersWithDataTarget(pDataTarget))
56 if (!GetRegistersWithPTrace())
62 #if defined(__aarch64__)
63 TRACE("Thread %04x PC %016llx SP %016llx\n", m_tid, (unsigned long long)MCREG_Pc(m_gpRegisters), (unsigned long long)MCREG_Sp(m_gpRegisters));
64 #elif defined(__arm__)
65 TRACE("Thread %04x PC %08lx SP %08lx\n", m_tid, (unsigned long)m_gpRegisters.ARM_pc, (unsigned long)m_gpRegisters.ARM_sp);
66 #elif defined(__x86_64__)
67 TRACE("Thread %04x RIP %016llx RSP %016llx\n", m_tid, (unsigned long long)m_gpRegisters.rip, (unsigned long long)m_gpRegisters.rsp);
69 #error "Unsupported architecture"
75 ThreadInfo::ResumeThread()
77 if (ptrace(PTRACE_DETACH, m_tid, nullptr, nullptr) != -1)
80 waitpid(m_tid, &waitStatus, __WALL);
84 // Helper for UnwindNativeFrames
86 GetFrameLocation(CONTEXT* pContext, uint64_t* ip, uint64_t* sp)
88 #if defined(__x86_64__)
91 #elif defined(__i386__)
94 #elif defined(__aarch64__)
97 #elif defined(__arm__)
98 *ip = pContext->Pc & ~THUMB_CODE;
103 // Helper for UnwindNativeFrames
105 ReadMemoryAdapter(PVOID address, PVOID buffer, SIZE_T size)
107 return g_crashInfo->ReadMemory(address, buffer, size);
111 ThreadInfo::UnwindNativeFrames(CrashInfo& crashInfo, CONTEXT* pContext)
113 uint64_t previousSp = 0;
115 // For each native frame
118 uint64_t ip = 0, sp = 0;
119 GetFrameLocation(pContext, &ip, &sp);
121 TRACE("Unwind: sp %" PRIA PRIx64 " ip %" PRIA PRIx64 "\n", sp, ip);
122 if (ip == 0 || sp <= previousSp) {
126 // Add two pages around the instruction pointer to the core dump
127 crashInfo.InsertMemoryRegion(ip - PAGE_SIZE, PAGE_SIZE * 2);
129 // Look up the ip address to get the module base address
130 uint64_t baseAddress = crashInfo.GetBaseAddress(ip);
131 if (baseAddress == 0) {
132 TRACE("Unwind: module base not found ip %" PRIA PRIx64 "\n", ip);
136 // Unwind the native frame adding all the memory accessed to the
137 // core dump via the read memory adapter.
138 if (!PAL_VirtualUnwindOutOfProc(pContext, nullptr, baseAddress, ReadMemoryAdapter)) {
139 TRACE("Unwind: PAL_VirtualUnwindOutOfProc returned false\n");
147 ThreadInfo::UnwindThread(CrashInfo& crashInfo, IXCLRDataProcess* pClrDataProcess)
149 TRACE("Unwind: thread %04x\n", Tid());
151 // Get starting native context for the thread
153 GetThreadContext(CONTEXT_ALL, &context);
155 // Unwind the native frames at the top of the stack
156 UnwindNativeFrames(crashInfo, &context);
158 if (pClrDataProcess != nullptr)
160 ReleaseHolder<IXCLRDataTask> pTask;
161 ReleaseHolder<IXCLRDataStackWalk> pStackwalk;
163 // Get the managed stack walker for this thread
164 if (SUCCEEDED(pClrDataProcess->GetTaskByOSThreadID(Tid(), &pTask)))
166 pTask->CreateStackWalk(
167 CLRDATA_SIMPFRAME_UNRECOGNIZED |
168 CLRDATA_SIMPFRAME_MANAGED_METHOD |
169 CLRDATA_SIMPFRAME_RUNTIME_MANAGED_CODE |
170 CLRDATA_SIMPFRAME_RUNTIME_UNMANAGED_CODE,
174 // For each managed frame (if any)
175 if (pStackwalk != nullptr)
177 TRACE("Unwind: managed frames\n");
180 // Get the managed stack frame context
181 if (pStackwalk->GetContext(CONTEXT_ALL, sizeof(context), nullptr, (BYTE *)&context) != S_OK) {
182 TRACE("Unwind: stack walker GetContext FAILED\n");
186 // Unwind all the native frames after the managed frame
187 UnwindNativeFrames(crashInfo, &context);
189 } while (pStackwalk->Next() == S_OK);
197 ThreadInfo::GetRegistersWithPTrace()
199 #if defined(__aarch64__)
200 struct iovec gpRegsVec = { &m_gpRegisters, sizeof(m_gpRegisters) };
201 if (ptrace((__ptrace_request)PTRACE_GETREGSET, m_tid, NT_PRSTATUS, &gpRegsVec) == -1)
203 fprintf(stderr, "ptrace(PTRACE_GETREGSET, %d, NT_PRSTATUS) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
206 assert(sizeof(m_gpRegisters) == gpRegsVec.iov_len);
208 struct iovec fpRegsVec = { &m_fpRegisters, sizeof(m_fpRegisters) };
209 if (ptrace((__ptrace_request)PTRACE_GETREGSET, m_tid, NT_FPREGSET, &fpRegsVec) == -1)
211 fprintf(stderr, "ptrace(PTRACE_GETREGSET, %d, NT_FPREGSET) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
214 assert(sizeof(m_fpRegisters) == fpRegsVec.iov_len);
216 if (ptrace((__ptrace_request)PTRACE_GETREGS, m_tid, nullptr, &m_gpRegisters) == -1)
218 fprintf(stderr, "ptrace(GETREGS, %d) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
221 if (ptrace((__ptrace_request)PTRACE_GETFPREGS, m_tid, nullptr, &m_fpRegisters) == -1)
223 fprintf(stderr, "ptrace(GETFPREGS, %d) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
226 #if defined(__i386__)
227 if (ptrace((__ptrace_request)PTRACE_GETFPXREGS, m_tid, nullptr, &m_fpxRegisters) == -1)
229 fprintf(stderr, "ptrace(GETFPXREGS, %d) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
232 #elif defined(__arm__) && defined(__VFP_FP__) && !defined(__SOFTFP__)
234 #if defined(ARM_VFPREGS_SIZE)
235 assert(sizeof(m_vfpRegisters) == ARM_VFPREGS_SIZE);
238 if (ptrace((__ptrace_request)PTRACE_GETVFPREGS, m_tid, nullptr, &m_vfpRegisters) == -1)
240 fprintf(stderr, "ptrace(PTRACE_GETVFPREGS, %d) FAILED %d (%s)\n", m_tid, errno, strerror(errno));
249 ThreadInfo::GetRegistersWithDataTarget(ICLRDataTarget* pDataTarget)
252 context.ContextFlags = CONTEXT_ALL;
253 if (pDataTarget->GetThreadContext(m_tid, context.ContextFlags, sizeof(context), reinterpret_cast<PBYTE>(&context)) != S_OK)
257 #if defined(__x86_64__)
258 m_gpRegisters.rbp = context.Rbp;
259 m_gpRegisters.rip = context.Rip;
260 m_gpRegisters.cs = context.SegCs;
261 m_gpRegisters.eflags = context.EFlags;
262 m_gpRegisters.ss = context.SegSs;
263 m_gpRegisters.rsp = context.Rsp;
264 m_gpRegisters.rdi = context.Rdi;
266 m_gpRegisters.rsi = context.Rsi;
267 m_gpRegisters.rbx = context.Rbx;
268 m_gpRegisters.rdx = context.Rdx;
269 m_gpRegisters.rcx = context.Rcx;
270 m_gpRegisters.rax = context.Rax;
271 m_gpRegisters.orig_rax = context.Rax;
272 m_gpRegisters.r8 = context.R8;
273 m_gpRegisters.r9 = context.R9;
274 m_gpRegisters.r10 = context.R10;
275 m_gpRegisters.r11 = context.R11;
276 m_gpRegisters.r12 = context.R12;
277 m_gpRegisters.r13 = context.R13;
278 m_gpRegisters.r14 = context.R14;
279 m_gpRegisters.r15 = context.R15;
281 m_gpRegisters.ds = context.SegDs;
282 m_gpRegisters.es = context.SegEs;
283 m_gpRegisters.fs = context.SegFs;
284 m_gpRegisters.gs = context.SegGs;
285 m_gpRegisters.fs_base = 0;
286 m_gpRegisters.gs_base = 0;
288 m_fpRegisters.cwd = context.FltSave.ControlWord;
289 m_fpRegisters.swd = context.FltSave.StatusWord;
290 m_fpRegisters.ftw = context.FltSave.TagWord;
291 m_fpRegisters.fop = context.FltSave.ErrorOpcode;
293 FPREG_ErrorOffset(m_fpRegisters) = context.FltSave.ErrorOffset;
294 FPREG_ErrorSelector(m_fpRegisters) = context.FltSave.ErrorSelector;
295 FPREG_DataOffset(m_fpRegisters) = context.FltSave.DataOffset;
296 FPREG_DataSelector(m_fpRegisters) = context.FltSave.DataSelector;
298 m_fpRegisters.mxcsr = context.FltSave.MxCsr;
299 m_fpRegisters.mxcr_mask = context.FltSave.MxCsr_Mask;
301 assert(sizeof(context.FltSave.FloatRegisters) == sizeof(m_fpRegisters.st_space));
302 memcpy(m_fpRegisters.st_space, context.FltSave.FloatRegisters, sizeof(m_fpRegisters.st_space));
304 assert(sizeof(context.FltSave.XmmRegisters) == sizeof(m_fpRegisters.xmm_space));
305 memcpy(m_fpRegisters.xmm_space, context.FltSave.XmmRegisters, sizeof(m_fpRegisters.xmm_space));
306 #elif defined(__aarch64__)
307 // See MCREG maps in PAL's context.h
308 assert(sizeof(m_gpRegisters.regs) == (sizeof(context.X) + sizeof(context.Fp) + sizeof(context.Lr)));
309 memcpy(m_gpRegisters.regs, context.X, sizeof(context.X));
310 MCREG_Fp(m_gpRegisters) = context.Fp;
311 MCREG_Lr(m_gpRegisters) = context.Lr;
312 MCREG_Sp(m_gpRegisters) = context.Sp;
313 MCREG_Pc(m_gpRegisters) = context.Pc;
314 MCREG_Cpsr(m_gpRegisters) = context.Cpsr;
316 assert(sizeof(m_fpRegisters.vregs) == sizeof(context.V));
317 memcpy(m_fpRegisters.vregs, context.V, sizeof(context.V));
318 m_fpRegisters.fpcr = context.Fpcr;
319 m_fpRegisters.fpsr = context.Fpsr;
320 #elif defined(__arm__)
321 m_gpRegisters.ARM_sp = context.Sp;
322 m_gpRegisters.ARM_lr = context.Lr;
323 m_gpRegisters.ARM_pc = context.Pc;
324 m_gpRegisters.ARM_cpsr = context.Cpsr;
326 m_gpRegisters.ARM_r0 = context.R0;
327 m_gpRegisters.ARM_ORIG_r0 = context.R0;
328 m_gpRegisters.ARM_r1 = context.R1;
329 m_gpRegisters.ARM_r2 = context.R2;
330 m_gpRegisters.ARM_r3 = context.R3;
331 m_gpRegisters.ARM_r4 = context.R4;
332 m_gpRegisters.ARM_r5 = context.R5;
333 m_gpRegisters.ARM_r6 = context.R6;
334 m_gpRegisters.ARM_r7 = context.R7;
335 m_gpRegisters.ARM_r8 = context.R8;
336 m_gpRegisters.ARM_r9 = context.R9;
337 m_gpRegisters.ARM_r10 = context.R10;
338 m_gpRegisters.ARM_fp = context.R11;
339 m_gpRegisters.ARM_ip = context.R12;
341 #if defined(__VFP_FP__) && !defined(__SOFTFP__)
342 m_vfpRegisters.fpscr = context.Fpscr;
344 assert(sizeof(context.D) == sizeof(m_vfpRegisters.fpregs));
345 memcpy(m_vfpRegisters.fpregs, context.D, sizeof(context.D));
348 #error Platform not supported
354 ThreadInfo::GetThreadStack(CrashInfo& crashInfo)
356 uint64_t startAddress;
359 #if defined(__aarch64__)
360 startAddress = MCREG_Sp(m_gpRegisters) & PAGE_MASK;
361 #elif defined(__arm__)
362 startAddress = m_gpRegisters.ARM_sp & PAGE_MASK;
364 startAddress = m_gpRegisters.rsp & PAGE_MASK;
366 size = 4 * PAGE_SIZE;
368 MemoryRegion search(0, startAddress, startAddress + PAGE_SIZE);
369 const MemoryRegion* region = CrashInfo::SearchMemoryRegions(crashInfo.OtherMappings(), search);
370 if (region != nullptr) {
372 // Use the mapping found for the size of the thread's stack
373 size = region->EndAddress() - startAddress;
377 TRACE("Thread %04x stack found in other mapping (size %08zx): ", m_tid, size);
381 crashInfo.InsertMemoryRegion(startAddress, size);
385 ThreadInfo::GetThreadContext(uint32_t flags, CONTEXT* context) const
387 context->ContextFlags = flags;
388 #if defined(__x86_64__)
389 if ((flags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
391 context->Rbp = m_gpRegisters.rbp;
392 context->Rip = m_gpRegisters.rip;
393 context->SegCs = m_gpRegisters.cs;
394 context->EFlags = m_gpRegisters.eflags;
395 context->SegSs = m_gpRegisters.ss;
396 context->Rsp = m_gpRegisters.rsp;
398 if ((flags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
400 context->Rdi = m_gpRegisters.rdi;
401 context->Rsi = m_gpRegisters.rsi;
402 context->Rbx = m_gpRegisters.rbx;
403 context->Rdx = m_gpRegisters.rdx;
404 context->Rcx = m_gpRegisters.rcx;
405 context->Rax = m_gpRegisters.rax;
406 context->R8 = m_gpRegisters.r8;
407 context->R9 = m_gpRegisters.r9;
408 context->R10 = m_gpRegisters.r10;
409 context->R11 = m_gpRegisters.r11;
410 context->R12 = m_gpRegisters.r12;
411 context->R13 = m_gpRegisters.r13;
412 context->R14 = m_gpRegisters.r14;
413 context->R15 = m_gpRegisters.r15;
415 if ((flags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS)
417 context->SegDs = m_gpRegisters.ds;
418 context->SegEs = m_gpRegisters.es;
419 context->SegFs = m_gpRegisters.fs;
420 context->SegGs = m_gpRegisters.gs;
422 if ((flags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
424 context->FltSave.ControlWord = m_fpRegisters.cwd;
425 context->FltSave.StatusWord = m_fpRegisters.swd;
426 context->FltSave.TagWord = m_fpRegisters.ftw;
427 context->FltSave.ErrorOpcode = m_fpRegisters.fop;
429 context->FltSave.ErrorOffset = FPREG_ErrorOffset(m_fpRegisters);
430 context->FltSave.ErrorSelector = FPREG_ErrorSelector(m_fpRegisters);
431 context->FltSave.DataOffset = FPREG_DataOffset(m_fpRegisters);
432 context->FltSave.DataSelector = FPREG_DataSelector(m_fpRegisters);
434 context->FltSave.MxCsr = m_fpRegisters.mxcsr;
435 context->FltSave.MxCsr_Mask = m_fpRegisters.mxcr_mask;
437 assert(sizeof(context->FltSave.FloatRegisters) == sizeof(m_fpRegisters.st_space));
438 memcpy(context->FltSave.FloatRegisters, m_fpRegisters.st_space, sizeof(context->FltSave.FloatRegisters));
440 assert(sizeof(context->FltSave.XmmRegisters) == sizeof(m_fpRegisters.xmm_space));
441 memcpy(context->FltSave.XmmRegisters, m_fpRegisters.xmm_space, sizeof(context->FltSave.XmmRegisters));
443 // TODO: debug registers?
444 #elif defined(__aarch64__)
445 if ((flags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
447 context->Fp = MCREG_Fp(m_gpRegisters);
448 context->Lr = MCREG_Lr(m_gpRegisters);
449 context->Sp = MCREG_Sp(m_gpRegisters);
450 context->Pc = MCREG_Pc(m_gpRegisters);
451 context->Cpsr = MCREG_Cpsr(m_gpRegisters);
453 if ((flags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
455 assert(sizeof(m_gpRegisters.regs) == (sizeof(context->X) + sizeof(context->Fp) + sizeof(context->Lr)));
456 memcpy(context->X, m_gpRegisters.regs, sizeof(context->X));
458 if ((flags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
460 assert(sizeof(m_fpRegisters.vregs) == sizeof(context->V));
461 memcpy(context->V, m_fpRegisters.vregs, sizeof(context->V));
462 context->Fpcr = m_fpRegisters.fpcr;
463 context->Fpsr = m_fpRegisters.fpsr;
465 #elif defined(__arm__)
466 if ((flags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
468 context->Sp = m_gpRegisters.ARM_sp;
469 context->Lr = m_gpRegisters.ARM_lr;
470 context->Pc = m_gpRegisters.ARM_pc;
471 context->Cpsr = m_gpRegisters.ARM_cpsr;
473 if ((flags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
475 context->R0 = m_gpRegisters.ARM_r0;
476 context->R1 = m_gpRegisters.ARM_r1;
477 context->R2 = m_gpRegisters.ARM_r2;
478 context->R3 = m_gpRegisters.ARM_r3;
479 context->R4 = m_gpRegisters.ARM_r4;
480 context->R5 = m_gpRegisters.ARM_r5;
481 context->R6 = m_gpRegisters.ARM_r6;
482 context->R7 = m_gpRegisters.ARM_r7;
483 context->R8 = m_gpRegisters.ARM_r8;
484 context->R9 = m_gpRegisters.ARM_r9;
485 context->R10 = m_gpRegisters.ARM_r10;
486 context->R11 = m_gpRegisters.ARM_fp;
487 context->R12 = m_gpRegisters.ARM_ip;
489 if ((flags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
491 #if defined(__VFP_FP__) && !defined(__SOFTFP__)
492 context->Fpscr = m_vfpRegisters.fpscr;
494 assert(sizeof(context->D) == sizeof(m_vfpRegisters.fpregs));
495 memcpy(context->D, m_vfpRegisters.fpregs, sizeof(context->D));
499 #error Platform not supported