Linux i386, x86_64, ARM64 Clang, GCC DWARF CFI
Mac OS X i386, x86_64 Clang, GCC DWARF CFI
NetBSD x86_64 Clang, GCC DWARF CFI
+Windows i386, x86_64 Clang DWARF CFI
============ ==================== ============ ========================
The following minimum compiler versions are strongly recommended.
# define _LIBUNWIND_HIGHEST_DWARF_REGISTER 8
# elif defined(__x86_64__)
# define _LIBUNWIND_TARGET_X86_64 1
-# define _LIBUNWIND_CONTEXT_SIZE 21
-# define _LIBUNWIND_CURSOR_SIZE 33
-# define _LIBUNWIND_HIGHEST_DWARF_REGISTER 16
+# if defined(_WIN64)
+# define _LIBUNWIND_CONTEXT_SIZE 54
+# define _LIBUNWIND_CURSOR_SIZE 66
+# define _LIBUNWIND_HIGHEST_DWARF_REGISTER 32
+# else
+# define _LIBUNWIND_CONTEXT_SIZE 21
+# define _LIBUNWIND_CURSOR_SIZE 33
+# define _LIBUNWIND_HIGHEST_DWARF_REGISTER 16
+# endif
# elif defined(__ppc__)
# define _LIBUNWIND_TARGET_PPC 1
# define _LIBUNWIND_CONTEXT_SIZE 117
UNW_X86_64_R12 = 12,
UNW_X86_64_R13 = 13,
UNW_X86_64_R14 = 14,
- UNW_X86_64_R15 = 15
+ UNW_X86_64_R15 = 15,
+ UNW_X86_64_RIP = 16,
+ UNW_X86_64_XMM0 = 17,
+ UNW_X86_64_XMM1 = 18,
+ UNW_X86_64_XMM2 = 19,
+ UNW_X86_64_XMM3 = 20,
+ UNW_X86_64_XMM4 = 21,
+ UNW_X86_64_XMM5 = 22,
+ UNW_X86_64_XMM6 = 23,
+ UNW_X86_64_XMM7 = 24,
+ UNW_X86_64_XMM8 = 25,
+ UNW_X86_64_XMM9 = 26,
+ UNW_X86_64_XMM10 = 27,
+ UNW_X86_64_XMM11 = 28,
+ UNW_X86_64_XMM12 = 29,
+ UNW_X86_64_XMM13 = 30,
+ UNW_X86_64_XMM14 = 31,
+ UNW_X86_64_XMM15 = 32,
};
_Unwind_Exception *exc);
uintptr_t private_1; // non-zero means forced unwind
uintptr_t private_2; // holds sp that phase1 found for phase2 to use
-#ifndef __LP64__
+#if __SIZEOF_POINTER__ == 4
// The implementation of _Unwind_Exception uses an attribute mode on the
// above fields which has the side effect of causing this whole struct to
// round up to 32 bytes in size. To be more explicit, we add pad fields
/// making local unwinds fast.
class __attribute__((visibility("hidden"))) LocalAddressSpace {
public:
-#ifdef __LP64__
- typedef uint64_t pint_t;
- typedef int64_t sint_t;
-#else
- typedef uint32_t pint_t;
- typedef int32_t sint_t;
-#endif
+ typedef uintptr_t pint_t;
+ typedef intptr_t sint_t;
uint8_t get8(pint_t addr) {
uint8_t val;
memcpy(&val, (void *)addr, sizeof(val));
};
inline uintptr_t LocalAddressSpace::getP(pint_t addr) {
-#ifdef __LP64__
+#if __SIZEOF_POINTER__ == 8
return get64(addr);
#else
return get32(addr);
bool validFloatRegister(int) const { return false; }
double getFloatRegister(int num) const;
void setFloatRegister(int num, double value);
- bool validVectorRegister(int) const { return false; }
+ bool validVectorRegister(int) const;
v128 getVectorRegister(int num) const;
void setVectorRegister(int num, v128 value);
const char *getRegisterName(int num);
uint64_t __cs;
uint64_t __fs;
uint64_t __gs;
+#if defined(_WIN64)
+ uint64_t __padding; // 16-byte align
+#endif
};
GPRs _registers;
+#if defined(_WIN64)
+ v128 _xmm[16];
+#endif
};
inline Registers_x86_64::Registers_x86_64(const void *registers) {
return "r14";
case UNW_X86_64_R15:
return "r15";
+ case UNW_X86_64_XMM0:
+ return "xmm0";
+ case UNW_X86_64_XMM1:
+ return "xmm1";
+ case UNW_X86_64_XMM2:
+ return "xmm2";
+ case UNW_X86_64_XMM3:
+ return "xmm3";
+ case UNW_X86_64_XMM4:
+ return "xmm4";
+ case UNW_X86_64_XMM5:
+ return "xmm5";
+ case UNW_X86_64_XMM6:
+ return "xmm6";
+ case UNW_X86_64_XMM7:
+ return "xmm7";
+ case UNW_X86_64_XMM8:
+ return "xmm8";
+ case UNW_X86_64_XMM9:
+ return "xmm9";
+ case UNW_X86_64_XMM10:
+ return "xmm10";
+ case UNW_X86_64_XMM11:
+ return "xmm11";
+ case UNW_X86_64_XMM12:
+ return "xmm12";
+ case UNW_X86_64_XMM13:
+ return "xmm13";
+ case UNW_X86_64_XMM14:
+ return "xmm14";
+ case UNW_X86_64_XMM15:
+ return "xmm15";
default:
return "unknown register";
}
_LIBUNWIND_ABORT("no x86_64 float registers");
}
-inline v128 Registers_x86_64::getVectorRegister(int) const {
+inline bool Registers_x86_64::validVectorRegister(int regNum) const {
+#if defined(_WIN64)
+ if (regNum < UNW_X86_64_XMM0)
+ return false;
+ if (regNum > UNW_X86_64_XMM15)
+ return false;
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline v128 Registers_x86_64::getVectorRegister(int regNum) const {
+#if defined(_WIN64)
+ assert(validVectorRegister(regNum));
+ return _xmm[regNum - UNW_X86_64_XMM0];
+#else
_LIBUNWIND_ABORT("no x86_64 vector registers");
+#endif
}
-inline void Registers_x86_64::setVectorRegister(int, v128) {
+inline void Registers_x86_64::setVectorRegister(int regNum, v128 value) {
+#if defined(_WIN64)
+ assert(validVectorRegister(regNum));
+ _xmm[regNum - UNW_X86_64_XMM0] = value;
+#else
_LIBUNWIND_ABORT("no x86_64 vector registers");
+#endif
}
#endif // _LIBUNWIND_TARGET_X86_64
// this frame.
if (frameInfo.handler != 0) {
__personality_routine p =
- (__personality_routine)(long)(frameInfo.handler);
+ (__personality_routine)(uintptr_t)(frameInfo.handler);
_LIBUNWIND_TRACE_UNWINDING(
"unwind_phase1(ex_ojb=%p): calling personality function %p",
(void *)exception_object, (void *)(uintptr_t)p);
// If there is a personality routine, tell it we are unwinding.
if (frameInfo.handler != 0) {
__personality_routine p =
- (__personality_routine)(long)(frameInfo.handler);
+ (__personality_routine)(uintptr_t)(frameInfo.handler);
_Unwind_Action action = _UA_CLEANUP_PHASE;
if (sp == exception_object->private_2) {
// Tell personality this was the frame it marked in phase 1.
#
# void libunwind::Registers_x86_64::jumpto()
#
+#if defined(_WIN64)
+# On entry, thread_state pointer is in rcx; move it into rdi
+# to share restore code below. Since this routine restores and
+# overwrites all registers, we can use the same registers for
+# pointers and temporaries as on unix even though win64 normally
+# mustn't clobber some of them.
+ movq %rcx, %rdi
+#else
# On entry, thread_state pointer is in rdi
+#endif
movq 56(%rdi), %rax # rax holds new stack pointer
subq $16, %rax
# skip cs
# skip fs
# skip gs
+
+#if defined(_WIN64)
+ movdqu 176(%rdi),%xmm0
+ movdqu 192(%rdi),%xmm1
+ movdqu 208(%rdi),%xmm2
+ movdqu 224(%rdi),%xmm3
+ movdqu 240(%rdi),%xmm4
+ movdqu 256(%rdi),%xmm5
+ movdqu 272(%rdi),%xmm6
+ movdqu 288(%rdi),%xmm7
+ movdqu 304(%rdi),%xmm8
+ movdqu 320(%rdi),%xmm9
+ movdqu 336(%rdi),%xmm10
+ movdqu 352(%rdi),%xmm11
+ movdqu 368(%rdi),%xmm12
+ movdqu 384(%rdi),%xmm13
+ movdqu 400(%rdi),%xmm14
+ movdqu 416(%rdi),%xmm15
+#endif
movq 56(%rdi), %rsp # cut back rsp to new location
pop %rdi # rdi was saved here earlier
ret # rip was saved here
# thread_state pointer is in rdi
#
DEFINE_LIBUNWIND_FUNCTION(unw_getcontext)
- movq %rax, (%rdi)
- movq %rbx, 8(%rdi)
- movq %rcx, 16(%rdi)
- movq %rdx, 24(%rdi)
- movq %rdi, 32(%rdi)
- movq %rsi, 40(%rdi)
- movq %rbp, 48(%rdi)
- movq %rsp, 56(%rdi)
- addq $8, 56(%rdi)
- movq %r8, 64(%rdi)
- movq %r9, 72(%rdi)
- movq %r10, 80(%rdi)
- movq %r11, 88(%rdi)
- movq %r12, 96(%rdi)
- movq %r13,104(%rdi)
- movq %r14,112(%rdi)
- movq %r15,120(%rdi)
- movq (%rsp),%rsi
- movq %rsi,128(%rdi) # store return address as rip
+#if defined(_WIN64)
+#define PTR %rcx
+#define TMP %rdx
+#else
+#define PTR %rdi
+#define TMP %rsi
+#endif
+
+ movq %rax, (PTR)
+ movq %rbx, 8(PTR)
+ movq %rcx, 16(PTR)
+ movq %rdx, 24(PTR)
+ movq %rdi, 32(PTR)
+ movq %rsi, 40(PTR)
+ movq %rbp, 48(PTR)
+ movq %rsp, 56(PTR)
+ addq $8, 56(PTR)
+ movq %r8, 64(PTR)
+ movq %r9, 72(PTR)
+ movq %r10, 80(PTR)
+ movq %r11, 88(PTR)
+ movq %r12, 96(PTR)
+ movq %r13,104(PTR)
+ movq %r14,112(PTR)
+ movq %r15,120(PTR)
+ movq (%rsp),TMP
+ movq TMP,128(PTR) # store return address as rip
# skip rflags
# skip cs
# skip fs
# skip gs
+
+#if defined(_WIN64)
+ movdqu %xmm0,176(PTR)
+ movdqu %xmm1,192(PTR)
+ movdqu %xmm2,208(PTR)
+ movdqu %xmm3,224(PTR)
+ movdqu %xmm4,240(PTR)
+ movdqu %xmm5,256(PTR)
+ movdqu %xmm6,272(PTR)
+ movdqu %xmm7,288(PTR)
+ movdqu %xmm8,304(PTR)
+ movdqu %xmm9,320(PTR)
+ movdqu %xmm10,336(PTR)
+ movdqu %xmm11,352(PTR)
+ movdqu %xmm12,368(PTR)
+ movdqu %xmm13,384(PTR)
+ movdqu %xmm14,400(PTR)
+ movdqu %xmm15,416(PTR)
+#endif
xorl %eax, %eax # return UNW_ESUCCESS
ret