1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "i386-xstate.h"
28 #include "elf/common.h"
30 #include "gdb_proc_service.h"
32 /* Defined in auto-generated file i386-linux.c. */
33 void init_registers_i386_linux (void);
34 /* Defined in auto-generated file amd64-linux.c. */
35 void init_registers_amd64_linux (void);
36 /* Defined in auto-generated file i386-avx-linux.c. */
37 void init_registers_i386_avx_linux (void);
38 /* Defined in auto-generated file amd64-avx-linux.c. */
39 void init_registers_amd64_avx_linux (void);
40 /* Defined in auto-generated file i386-mmx-linux.c. */
41 void init_registers_i386_mmx_linux (void);
43 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
45 /* Backward compatibility for gdb without XML support. */
47 static const char *xmltarget_i386_linux_no_xml = "@<target>\
48 <architecture>i386</architecture>\
49 <osabi>GNU/Linux</osabi>\
53 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
54 <architecture>i386:x86-64</architecture>\
55 <osabi>GNU/Linux</osabi>\
60 #include <sys/procfs.h>
61 #include <sys/ptrace.h>
64 #ifndef PTRACE_GETREGSET
65 #define PTRACE_GETREGSET 0x4204
68 #ifndef PTRACE_SETREGSET
69 #define PTRACE_SETREGSET 0x4205
73 #ifndef PTRACE_GET_THREAD_AREA
74 #define PTRACE_GET_THREAD_AREA 25
77 /* This definition comes from prctl.h, but some kernels may not have it. */
78 #ifndef PTRACE_ARCH_PRCTL
79 #define PTRACE_ARCH_PRCTL 30
82 /* The following definitions come from prctl.h, but may be absent
83 for certain configurations. */
85 #define ARCH_SET_GS 0x1001
86 #define ARCH_SET_FS 0x1002
87 #define ARCH_GET_FS 0x1003
88 #define ARCH_GET_GS 0x1004
91 /* Per-process arch-specific data we want to keep. */
93 struct arch_process_info
95 struct i386_debug_reg_state debug_reg_state;
98 /* Per-thread arch-specific data we want to keep. */
102 /* Non-zero if our copy differs from what's recorded in the thread. */
103 int debug_registers_changed;
108 /* Mapping between the general-purpose registers in `struct user'
109 format and GDB's register array layout.
110 Note that the transfer layout uses 64-bit regs. */
111 static /*const*/ int i386_regmap[] =
113 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
114 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
115 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
116 DS * 8, ES * 8, FS * 8, GS * 8
119 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
121 /* So code below doesn't have to care, i386 or amd64. */
122 #define ORIG_EAX ORIG_RAX
124 static const int x86_64_regmap[] =
126 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
127 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
128 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
129 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
130 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
131 DS * 8, ES * 8, FS * 8, GS * 8,
132 -1, -1, -1, -1, -1, -1, -1, -1,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1, -1,
139 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
141 #else /* ! __x86_64__ */
143 /* Mapping between the general-purpose registers in `struct user'
144 format and GDB's register array layout. */
145 static /*const*/ int i386_regmap[] =
147 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
148 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
149 EIP * 4, EFL * 4, CS * 4, SS * 4,
150 DS * 4, ES * 4, FS * 4, GS * 4
153 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
157 /* Called by libthread_db. */
160 ps_get_thread_area (const struct ps_prochandle *ph,
161 lwpid_t lwpid, int idx, void **base)
164 int use_64bit = register_size (0) == 8;
171 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
175 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
186 unsigned int desc[4];
188 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
189 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
192 *(int *)base = desc[1];
197 /* Get the thread area address. This is used to recognize which
198 thread is which when tracing with the in-process agent library. We
199 don't read anything from the address, and treat it as opaque; it's
200 the address itself that we assume is unique per-thread. */
203 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
206 int use_64bit = register_size (0) == 8;
211 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
213 *addr = (CORE_ADDR) (uintptr_t) base;
222 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
223 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
224 unsigned int desc[4];
226 const int reg_thread_area = 3; /* bits to scale down register value. */
229 collect_register_by_name (regcache, "gs", &gs);
231 idx = gs >> reg_thread_area;
233 if (ptrace (PTRACE_GET_THREAD_AREA,
234 lwpid_of (lwp), (void *) (long) idx, (unsigned long) &desc) < 0)
245 i386_cannot_store_register (int regno)
247 return regno >= I386_NUM_REGS;
251 i386_cannot_fetch_register (int regno)
253 return regno >= I386_NUM_REGS;
257 x86_fill_gregset (struct regcache *regcache, void *buf)
262 if (register_size (0) == 8)
264 for (i = 0; i < X86_64_NUM_REGS; i++)
265 if (x86_64_regmap[i] != -1)
266 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
271 for (i = 0; i < I386_NUM_REGS; i++)
272 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
274 collect_register_by_name (regcache, "orig_eax",
275 ((char *) buf) + ORIG_EAX * 4);
279 x86_store_gregset (struct regcache *regcache, const void *buf)
284 if (register_size (0) == 8)
286 for (i = 0; i < X86_64_NUM_REGS; i++)
287 if (x86_64_regmap[i] != -1)
288 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
293 for (i = 0; i < I386_NUM_REGS; i++)
294 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
296 supply_register_by_name (regcache, "orig_eax",
297 ((char *) buf) + ORIG_EAX * 4);
301 x86_fill_fpregset (struct regcache *regcache, void *buf)
304 i387_cache_to_fxsave (regcache, buf);
306 i387_cache_to_fsave (regcache, buf);
311 x86_store_fpregset (struct regcache *regcache, const void *buf)
314 i387_fxsave_to_cache (regcache, buf);
316 i387_fsave_to_cache (regcache, buf);
323 x86_fill_fpxregset (struct regcache *regcache, void *buf)
325 i387_cache_to_fxsave (regcache, buf);
329 x86_store_fpxregset (struct regcache *regcache, const void *buf)
331 i387_fxsave_to_cache (regcache, buf);
337 x86_fill_xstateregset (struct regcache *regcache, void *buf)
339 i387_cache_to_xsave (regcache, buf);
343 x86_store_xstateregset (struct regcache *regcache, const void *buf)
345 i387_xsave_to_cache (regcache, buf);
348 /* ??? The non-biarch i386 case stores all the i387 regs twice.
349 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
350 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
351 doesn't work. IWBN to avoid the duplication in the case where it
352 does work. Maybe the arch_setup routine could check whether it works
353 and update target_regsets accordingly, maybe by moving target_regsets
354 to linux_target_ops and set the right one there, rather than having to
355 modify the target_regsets global. */
357 struct regset_info target_regsets[] =
359 #ifdef HAVE_PTRACE_GETREGS
360 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
362 x86_fill_gregset, x86_store_gregset },
363 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
364 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
366 # ifdef HAVE_PTRACE_GETFPXREGS
367 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
369 x86_fill_fpxregset, x86_store_fpxregset },
372 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
374 x86_fill_fpregset, x86_store_fpregset },
375 #endif /* HAVE_PTRACE_GETREGS */
376 { 0, 0, 0, -1, -1, NULL, NULL }
380 x86_get_pc (struct regcache *regcache)
382 int use_64bit = register_size (0) == 8;
387 collect_register_by_name (regcache, "rip", &pc);
388 return (CORE_ADDR) pc;
393 collect_register_by_name (regcache, "eip", &pc);
394 return (CORE_ADDR) pc;
399 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
401 int use_64bit = register_size (0) == 8;
405 unsigned long newpc = pc;
406 supply_register_by_name (regcache, "rip", &newpc);
410 unsigned int newpc = pc;
411 supply_register_by_name (regcache, "eip", &newpc);
415 static const unsigned char x86_breakpoint[] = { 0xCC };
416 #define x86_breakpoint_len 1
419 x86_breakpoint_at (CORE_ADDR pc)
423 (*the_target->read_memory) (pc, &c, 1);
430 /* Support for debug registers. */
433 x86_linux_dr_get (ptid_t ptid, int regnum)
438 tid = ptid_get_lwp (ptid);
441 value = ptrace (PTRACE_PEEKUSER, tid,
442 offsetof (struct user, u_debugreg[regnum]), 0);
444 error ("Couldn't read debug register");
450 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
454 tid = ptid_get_lwp (ptid);
457 ptrace (PTRACE_POKEUSER, tid,
458 offsetof (struct user, u_debugreg[regnum]), value);
460 error ("Couldn't write debug register");
463 /* Update the inferior's debug register REGNUM from STATE. */
466 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
468 struct inferior_list_entry *lp;
470 /* Only need to update the threads of this process. */
471 int pid = pid_of (get_thread_lwp (current_inferior));
473 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
474 fatal ("Invalid debug register %d", regnum);
476 addr = state->dr_mirror[regnum];
478 for (lp = all_lwps.head; lp; lp = lp->next)
480 struct lwp_info *lwp = (struct lwp_info *) lp;
482 /* The actual update is done later, we just mark that the register
484 if (pid_of (lwp) == pid)
485 lwp->arch_private->debug_registers_changed = 1;
489 /* Update the inferior's DR7 debug control register from STATE. */
492 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
494 struct inferior_list_entry *lp;
495 /* Only need to update the threads of this process. */
496 int pid = pid_of (get_thread_lwp (current_inferior));
498 for (lp = all_lwps.head; lp; lp = lp->next)
500 struct lwp_info *lwp = (struct lwp_info *) lp;
502 /* The actual update is done later, we just mark that the register
504 if (pid_of (lwp) == pid)
505 lwp->arch_private->debug_registers_changed = 1;
509 /* Get the value of the DR6 debug status register from the inferior
510 and record it in STATE. */
513 i386_dr_low_get_status (struct i386_debug_reg_state *state)
515 struct lwp_info *lwp = get_thread_lwp (current_inferior);
516 ptid_t ptid = ptid_of (lwp);
518 state->dr_status_mirror = x86_linux_dr_get (ptid, DR_STATUS);
521 /* Watchpoint support. */
524 x86_insert_point (char type, CORE_ADDR addr, int len)
526 struct process_info *proc = current_process ();
530 return set_gdb_breakpoint_at (addr);
534 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
543 x86_remove_point (char type, CORE_ADDR addr, int len)
545 struct process_info *proc = current_process ();
549 return delete_gdb_breakpoint_at (addr);
553 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
562 x86_stopped_by_watchpoint (void)
564 struct process_info *proc = current_process ();
565 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
569 x86_stopped_data_address (void)
571 struct process_info *proc = current_process ();
573 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
579 /* Called when a new process is created. */
581 static struct arch_process_info *
582 x86_linux_new_process (void)
584 struct arch_process_info *info = xcalloc (1, sizeof (*info));
586 i386_low_init_dregs (&info->debug_reg_state);
591 /* Called when a new thread is detected. */
593 static struct arch_lwp_info *
594 x86_linux_new_thread (void)
596 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
598 info->debug_registers_changed = 1;
603 /* Called when resuming a thread.
604 If the debug regs have changed, update the thread's copies. */
607 x86_linux_prepare_to_resume (struct lwp_info *lwp)
609 ptid_t ptid = ptid_of (lwp);
611 if (lwp->arch_private->debug_registers_changed)
614 int pid = ptid_get_pid (ptid);
615 struct process_info *proc = find_process_pid (pid);
616 struct i386_debug_reg_state *state = &proc->private->arch_private->debug_reg_state;
618 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
619 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
621 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
623 lwp->arch_private->debug_registers_changed = 0;
626 if (lwp->stopped_by_watchpoint)
627 x86_linux_dr_set (ptid, DR_STATUS, 0);
630 /* When GDBSERVER is built as a 64-bit application on linux, the
631 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
632 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
633 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
634 conversion in-place ourselves. */
636 /* These types below (compat_*) define a siginfo type that is layout
637 compatible with the siginfo type exported by the 32-bit userspace
642 typedef int compat_int_t;
643 typedef unsigned int compat_uptr_t;
645 typedef int compat_time_t;
646 typedef int compat_timer_t;
647 typedef int compat_clock_t;
649 struct compat_timeval
651 compat_time_t tv_sec;
655 typedef union compat_sigval
657 compat_int_t sival_int;
658 compat_uptr_t sival_ptr;
661 typedef struct compat_siginfo
669 int _pad[((128 / sizeof (int)) - 3)];
678 /* POSIX.1b timers */
683 compat_sigval_t _sigval;
686 /* POSIX.1b signals */
691 compat_sigval_t _sigval;
700 compat_clock_t _utime;
701 compat_clock_t _stime;
704 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
719 #define cpt_si_pid _sifields._kill._pid
720 #define cpt_si_uid _sifields._kill._uid
721 #define cpt_si_timerid _sifields._timer._tid
722 #define cpt_si_overrun _sifields._timer._overrun
723 #define cpt_si_status _sifields._sigchld._status
724 #define cpt_si_utime _sifields._sigchld._utime
725 #define cpt_si_stime _sifields._sigchld._stime
726 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
727 #define cpt_si_addr _sifields._sigfault._addr
728 #define cpt_si_band _sifields._sigpoll._band
729 #define cpt_si_fd _sifields._sigpoll._fd
731 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
732 In their place is si_timer1,si_timer2. */
734 #define si_timerid si_timer1
737 #define si_overrun si_timer2
741 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
743 memset (to, 0, sizeof (*to));
745 to->si_signo = from->si_signo;
746 to->si_errno = from->si_errno;
747 to->si_code = from->si_code;
751 to->cpt_si_ptr = (intptr_t) from->si_ptr;
753 else if (to->si_code == SI_USER)
755 to->cpt_si_pid = from->si_pid;
756 to->cpt_si_uid = from->si_uid;
758 else if (to->si_code == SI_TIMER)
760 to->cpt_si_timerid = from->si_timerid;
761 to->cpt_si_overrun = from->si_overrun;
762 to->cpt_si_ptr = (intptr_t) from->si_ptr;
766 switch (to->si_signo)
769 to->cpt_si_pid = from->si_pid;
770 to->cpt_si_uid = from->si_uid;
771 to->cpt_si_status = from->si_status;
772 to->cpt_si_utime = from->si_utime;
773 to->cpt_si_stime = from->si_stime;
779 to->cpt_si_addr = (intptr_t) from->si_addr;
782 to->cpt_si_band = from->si_band;
783 to->cpt_si_fd = from->si_fd;
786 to->cpt_si_pid = from->si_pid;
787 to->cpt_si_uid = from->si_uid;
788 to->cpt_si_ptr = (intptr_t) from->si_ptr;
795 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
797 memset (to, 0, sizeof (*to));
799 to->si_signo = from->si_signo;
800 to->si_errno = from->si_errno;
801 to->si_code = from->si_code;
805 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
807 else if (to->si_code == SI_USER)
809 to->si_pid = from->cpt_si_pid;
810 to->si_uid = from->cpt_si_uid;
812 else if (to->si_code == SI_TIMER)
814 to->si_timerid = from->cpt_si_timerid;
815 to->si_overrun = from->cpt_si_overrun;
816 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
820 switch (to->si_signo)
823 to->si_pid = from->cpt_si_pid;
824 to->si_uid = from->cpt_si_uid;
825 to->si_status = from->cpt_si_status;
826 to->si_utime = from->cpt_si_utime;
827 to->si_stime = from->cpt_si_stime;
833 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
836 to->si_band = from->cpt_si_band;
837 to->si_fd = from->cpt_si_fd;
840 to->si_pid = from->cpt_si_pid;
841 to->si_uid = from->cpt_si_uid;
842 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
848 #endif /* __x86_64__ */
850 /* Convert a native/host siginfo object, into/from the siginfo in the
851 layout of the inferiors' architecture. Returns true if any
852 conversion was done; false otherwise. If DIRECTION is 1, then copy
853 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
857 x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
860 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
861 if (register_size (0) == 4)
863 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
864 fatal ("unexpected difference in siginfo");
867 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
869 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
880 /* Update gdbserver_xmltarget. */
883 x86_linux_update_xmltarget (void)
886 struct regset_info *regset;
887 static unsigned long long xcr0;
888 static int have_ptrace_getregset = -1;
889 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
890 static int have_ptrace_getfpxregs = -1;
893 if (!current_inferior)
896 /* Before changing the register cache internal layout or the target
897 regsets, flush the contents of the current valid caches back to
899 regcache_invalidate ();
901 pid = pid_of (get_thread_lwp (current_inferior));
903 if (num_xmm_registers == 8)
904 init_registers_i386_linux ();
906 init_registers_amd64_linux ();
909 # ifdef HAVE_PTRACE_GETFPXREGS
910 if (have_ptrace_getfpxregs == -1)
912 elf_fpxregset_t fpxregs;
914 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
916 have_ptrace_getfpxregs = 0;
917 x86_xcr0 = I386_XSTATE_X87_MASK;
919 /* Disable PTRACE_GETFPXREGS. */
920 for (regset = target_regsets;
921 regset->fill_function != NULL; regset++)
922 if (regset->get_request == PTRACE_GETFPXREGS)
929 have_ptrace_getfpxregs = 1;
932 if (!have_ptrace_getfpxregs)
934 init_registers_i386_mmx_linux ();
938 init_registers_i386_linux ();
946 if (num_xmm_registers == 8)
947 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
949 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
951 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
954 x86_xcr0 = I386_XSTATE_SSE_MASK;
959 /* Check if XSAVE extended state is supported. */
960 if (have_ptrace_getregset == -1)
962 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
965 iov.iov_base = xstateregs;
966 iov.iov_len = sizeof (xstateregs);
968 /* Check if PTRACE_GETREGSET works. */
969 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
972 have_ptrace_getregset = 0;
976 have_ptrace_getregset = 1;
978 /* Get XCR0 from XSAVE extended state at byte 464. */
979 xcr0 = xstateregs[464 / sizeof (long long)];
981 /* Use PTRACE_GETREGSET if it is available. */
982 for (regset = target_regsets;
983 regset->fill_function != NULL; regset++)
984 if (regset->get_request == PTRACE_GETREGSET)
985 regset->size = I386_XSTATE_SIZE (xcr0);
986 else if (regset->type != GENERAL_REGS)
990 if (have_ptrace_getregset)
992 /* AVX is the highest feature we support. */
993 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
998 /* I386 has 8 xmm regs. */
999 if (num_xmm_registers == 8)
1000 init_registers_i386_avx_linux ();
1002 init_registers_amd64_avx_linux ();
1004 init_registers_i386_avx_linux ();
1010 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1011 PTRACE_GETREGSET. */
1014 x86_linux_process_qsupported (const char *query)
1016 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1017 with "i386" in qSupported query, it supports x86 XML target
1020 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1022 char *copy = xstrdup (query + 13);
1025 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1027 if (strcmp (p, "i386") == 0)
1037 x86_linux_update_xmltarget ();
1040 /* Initialize gdbserver for the architecture of the inferior. */
1043 x86_arch_setup (void)
1046 int pid = pid_of (get_thread_lwp (current_inferior));
1047 char *file = linux_child_pid_to_exec_file (pid);
1048 int use_64bit = elf_64_file_p (file);
1054 /* This can only happen if /proc/<pid>/exe is unreadable,
1055 but "that can't happen" if we've gotten this far.
1056 Fall through and assume this is a 32-bit program. */
1060 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1061 the_low_target.num_regs = -1;
1062 the_low_target.regmap = NULL;
1063 the_low_target.cannot_fetch_register = NULL;
1064 the_low_target.cannot_store_register = NULL;
1066 /* Amd64 has 16 xmm regs. */
1067 num_xmm_registers = 16;
1069 x86_linux_update_xmltarget ();
1074 /* Ok we have a 32-bit inferior. */
1076 the_low_target.num_regs = I386_NUM_REGS;
1077 the_low_target.regmap = i386_regmap;
1078 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1079 the_low_target.cannot_store_register = i386_cannot_store_register;
1081 /* I386 has 8 xmm regs. */
1082 num_xmm_registers = 8;
1084 x86_linux_update_xmltarget ();
1088 x86_supports_tracepoints (void)
1094 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1096 write_inferior_memory (*to, buf, len);
1101 push_opcode (unsigned char *buf, char *op)
1103 unsigned char *buf_org = buf;
1108 unsigned long ul = strtoul (op, &endptr, 16);
1117 return buf - buf_org;
1122 /* Build a jump pad that saves registers and calls a collection
1123 function. Writes a jump instruction to the jump pad to
1124 JJUMPAD_INSN. The caller is responsible to write it in at the
1125 tracepoint address. */
1128 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1129 CORE_ADDR collector,
1132 CORE_ADDR *jump_entry,
1133 unsigned char *jjump_pad_insn,
1134 ULONGEST *jjump_pad_insn_size,
1135 CORE_ADDR *adjusted_insn_addr,
1136 CORE_ADDR *adjusted_insn_addr_end)
1138 unsigned char buf[40];
1140 CORE_ADDR buildaddr = *jump_entry;
1142 /* Build the jump pad. */
1144 /* First, do tracepoint data collection. Save registers. */
1146 /* Need to ensure stack pointer saved first. */
1147 buf[i++] = 0x54; /* push %rsp */
1148 buf[i++] = 0x55; /* push %rbp */
1149 buf[i++] = 0x57; /* push %rdi */
1150 buf[i++] = 0x56; /* push %rsi */
1151 buf[i++] = 0x52; /* push %rdx */
1152 buf[i++] = 0x51; /* push %rcx */
1153 buf[i++] = 0x53; /* push %rbx */
1154 buf[i++] = 0x50; /* push %rax */
1155 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1156 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1157 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1158 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1159 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1160 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1161 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1162 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1163 buf[i++] = 0x9c; /* pushfq */
1164 buf[i++] = 0x48; /* movl <addr>,%rdi */
1166 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1167 i += sizeof (unsigned long);
1168 buf[i++] = 0x57; /* push %rdi */
1169 append_insns (&buildaddr, i, buf);
1171 /* Stack space for the collecting_t object. */
1173 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1174 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1175 memcpy (buf + i, &tpoint, 8);
1177 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1178 i += push_opcode (&buf[i],
1179 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1180 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1181 append_insns (&buildaddr, i, buf);
1185 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1186 memcpy (&buf[i], (void *) &lockaddr, 8);
1188 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1189 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1190 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1191 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1192 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1193 append_insns (&buildaddr, i, buf);
1195 /* Set up the gdb_collect call. */
1196 /* At this point, (stack pointer + 0x18) is the base of our saved
1200 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1201 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1203 /* tpoint address may be 64-bit wide. */
1204 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1205 memcpy (buf + i, &tpoint, 8);
1207 append_insns (&buildaddr, i, buf);
1209 /* The collector function being in the shared library, may be
1210 >31-bits away off the jump pad. */
1212 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1213 memcpy (buf + i, &collector, 8);
1215 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1216 append_insns (&buildaddr, i, buf);
1218 /* Clear the spin-lock. */
1220 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1221 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1222 memcpy (buf + i, &lockaddr, 8);
1224 append_insns (&buildaddr, i, buf);
1226 /* Remove stack that had been used for the collect_t object. */
1228 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1229 append_insns (&buildaddr, i, buf);
1231 /* Restore register state. */
1233 buf[i++] = 0x48; /* add $0x8,%rsp */
1237 buf[i++] = 0x9d; /* popfq */
1238 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1239 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1240 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1241 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1242 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1243 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1244 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1245 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1246 buf[i++] = 0x58; /* pop %rax */
1247 buf[i++] = 0x5b; /* pop %rbx */
1248 buf[i++] = 0x59; /* pop %rcx */
1249 buf[i++] = 0x5a; /* pop %rdx */
1250 buf[i++] = 0x5e; /* pop %rsi */
1251 buf[i++] = 0x5f; /* pop %rdi */
1252 buf[i++] = 0x5d; /* pop %rbp */
1253 buf[i++] = 0x5c; /* pop %rsp */
1254 append_insns (&buildaddr, i, buf);
1256 /* Now, adjust the original instruction to execute in the jump
1258 *adjusted_insn_addr = buildaddr;
1259 relocate_instruction (&buildaddr, tpaddr);
1260 *adjusted_insn_addr_end = buildaddr;
1262 /* Finally, write a jump back to the program. */
1263 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1264 memcpy (buf, jump_insn, sizeof (jump_insn));
1265 memcpy (buf + 1, &offset, 4);
1266 append_insns (&buildaddr, sizeof (jump_insn), buf);
1268 /* The jump pad is now built. Wire in a jump to our jump pad. This
1269 is always done last (by our caller actually), so that we can
1270 install fast tracepoints with threads running. This relies on
1271 the agent's atomic write support. */
1272 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1273 memcpy (buf, jump_insn, sizeof (jump_insn));
1274 memcpy (buf + 1, &offset, 4);
1275 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1276 *jjump_pad_insn_size = sizeof (jump_insn);
1278 /* Return the end address of our pad. */
1279 *jump_entry = buildaddr;
1284 #endif /* __x86_64__ */
1286 /* Build a jump pad that saves registers and calls a collection
1287 function. Writes a jump instruction to the jump pad to
1288 JJUMPAD_INSN. The caller is responsible to write it in at the
1289 tracepoint address. */
1292 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1293 CORE_ADDR collector,
1296 CORE_ADDR *jump_entry,
1297 unsigned char *jjump_pad_insn,
1298 ULONGEST *jjump_pad_insn_size,
1299 CORE_ADDR *adjusted_insn_addr,
1300 CORE_ADDR *adjusted_insn_addr_end)
1302 unsigned char buf[0x100];
1304 CORE_ADDR buildaddr = *jump_entry;
1306 /* Build the jump pad. */
1308 /* First, do tracepoint data collection. Save registers. */
1310 buf[i++] = 0x60; /* pushad */
1311 buf[i++] = 0x68; /* push tpaddr aka $pc */
1312 *((int *)(buf + i)) = (int) tpaddr;
1314 buf[i++] = 0x9c; /* pushf */
1315 buf[i++] = 0x1e; /* push %ds */
1316 buf[i++] = 0x06; /* push %es */
1317 buf[i++] = 0x0f; /* push %fs */
1319 buf[i++] = 0x0f; /* push %gs */
1321 buf[i++] = 0x16; /* push %ss */
1322 buf[i++] = 0x0e; /* push %cs */
1323 append_insns (&buildaddr, i, buf);
1325 /* Stack space for the collecting_t object. */
1327 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1329 /* Build the object. */
1330 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1331 memcpy (buf + i, &tpoint, 4);
1333 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1335 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1336 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1337 append_insns (&buildaddr, i, buf);
1339 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1340 If we cared for it, this could be using xchg alternatively. */
1343 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1344 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1346 memcpy (&buf[i], (void *) &lockaddr, 4);
1348 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1349 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1350 append_insns (&buildaddr, i, buf);
1353 /* Set up arguments to the gdb_collect call. */
1355 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1356 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1357 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1358 append_insns (&buildaddr, i, buf);
1361 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1362 append_insns (&buildaddr, i, buf);
1365 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1366 memcpy (&buf[i], (void *) &tpoint, 4);
1368 append_insns (&buildaddr, i, buf);
1370 buf[0] = 0xe8; /* call <reladdr> */
1371 offset = collector - (buildaddr + sizeof (jump_insn));
1372 memcpy (buf + 1, &offset, 4);
1373 append_insns (&buildaddr, 5, buf);
1374 /* Clean up after the call. */
1375 buf[0] = 0x83; /* add $0x8,%esp */
1378 append_insns (&buildaddr, 3, buf);
1381 /* Clear the spin-lock. This would need the LOCK prefix on older
1384 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1385 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1386 memcpy (buf + i, &lockaddr, 4);
1388 append_insns (&buildaddr, i, buf);
1391 /* Remove stack that had been used for the collect_t object. */
1393 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1394 append_insns (&buildaddr, i, buf);
1397 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1400 buf[i++] = 0x17; /* pop %ss */
1401 buf[i++] = 0x0f; /* pop %gs */
1403 buf[i++] = 0x0f; /* pop %fs */
1405 buf[i++] = 0x07; /* pop %es */
1406 buf[i++] = 0x1f; /* pop %de */
1407 buf[i++] = 0x9d; /* popf */
1408 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1411 buf[i++] = 0x61; /* popad */
1412 append_insns (&buildaddr, i, buf);
1414 /* Now, adjust the original instruction to execute in the jump
1416 *adjusted_insn_addr = buildaddr;
1417 relocate_instruction (&buildaddr, tpaddr);
1418 *adjusted_insn_addr_end = buildaddr;
1420 /* Write the jump back to the program. */
1421 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1422 memcpy (buf, jump_insn, sizeof (jump_insn));
1423 memcpy (buf + 1, &offset, 4);
1424 append_insns (&buildaddr, sizeof (jump_insn), buf);
1426 /* The jump pad is now built. Wire in a jump to our jump pad. This
1427 is always done last (by our caller actually), so that we can
1428 install fast tracepoints with threads running. This relies on
1429 the agent's atomic write support. */
1430 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1431 memcpy (buf, jump_insn, sizeof (jump_insn));
1432 memcpy (buf + 1, &offset, 4);
1433 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1434 *jjump_pad_insn_size = sizeof (jump_insn);
1436 /* Return the end address of our pad. */
1437 *jump_entry = buildaddr;
1443 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1444 CORE_ADDR collector,
1447 CORE_ADDR *jump_entry,
1448 unsigned char *jjump_pad_insn,
1449 ULONGEST *jjump_pad_insn_size,
1450 CORE_ADDR *adjusted_insn_addr,
1451 CORE_ADDR *adjusted_insn_addr_end)
1454 if (register_size (0) == 8)
1455 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1456 collector, lockaddr,
1457 orig_size, jump_entry,
1459 jjump_pad_insn_size,
1461 adjusted_insn_addr_end);
1464 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1465 collector, lockaddr,
1466 orig_size, jump_entry,
1468 jjump_pad_insn_size,
1470 adjusted_insn_addr_end);
1473 /* This is initialized assuming an amd64 target.
1474 x86_arch_setup will correct it for i386 or amd64 targets. */
1476 struct linux_target_ops the_low_target =
1492 x86_stopped_by_watchpoint,
1493 x86_stopped_data_address,
1494 /* collect_ptrace_register/supply_ptrace_register are not needed in the
1495 native i386 case (no registers smaller than an xfer unit), and are not
1496 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
1499 /* need to fix up i386 siginfo if host is amd64 */
1501 x86_linux_new_process,
1502 x86_linux_new_thread,
1503 x86_linux_prepare_to_resume,
1504 x86_linux_process_qsupported,
1505 x86_supports_tracepoints,
1506 x86_get_thread_area,
1507 x86_install_fast_tracepoint_jump_pad