1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
29 #include "elf/common.h"
31 #include "gdb_proc_service.h"
33 /* Defined in auto-generated file i386-linux.c. */
34 void init_registers_i386_linux (void);
35 /* Defined in auto-generated file amd64-linux.c. */
36 void init_registers_amd64_linux (void);
37 /* Defined in auto-generated file i386-avx-linux.c. */
38 void init_registers_i386_avx_linux (void);
39 /* Defined in auto-generated file amd64-avx-linux.c. */
40 void init_registers_amd64_avx_linux (void);
41 /* Defined in auto-generated file i386-mmx-linux.c. */
42 void init_registers_i386_mmx_linux (void);
44 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
46 /* Backward compatibility for gdb without XML support. */
48 static const char *xmltarget_i386_linux_no_xml = "@<target>\
49 <architecture>i386</architecture>\
50 <osabi>GNU/Linux</osabi>\
54 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
55 <architecture>i386:x86-64</architecture>\
56 <osabi>GNU/Linux</osabi>\
61 #include <sys/procfs.h>
62 #include <sys/ptrace.h>
65 #ifndef PTRACE_GETREGSET
66 #define PTRACE_GETREGSET 0x4204
69 #ifndef PTRACE_SETREGSET
70 #define PTRACE_SETREGSET 0x4205
74 #ifndef PTRACE_GET_THREAD_AREA
75 #define PTRACE_GET_THREAD_AREA 25
78 /* This definition comes from prctl.h, but some kernels may not have it. */
79 #ifndef PTRACE_ARCH_PRCTL
80 #define PTRACE_ARCH_PRCTL 30
83 /* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
86 #define ARCH_SET_GS 0x1001
87 #define ARCH_SET_FS 0x1002
88 #define ARCH_GET_FS 0x1003
89 #define ARCH_GET_GS 0x1004
92 /* Per-process arch-specific data we want to keep. */
94 struct arch_process_info
96 struct i386_debug_reg_state debug_reg_state;
99 /* Per-thread arch-specific data we want to keep. */
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed;
109 /* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112 static /*const*/ int i386_regmap[] =
114 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
115 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
116 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
117 DS * 8, ES * 8, FS * 8, GS * 8
120 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
122 /* So code below doesn't have to care, i386 or amd64. */
123 #define ORIG_EAX ORIG_RAX
125 static const int x86_64_regmap[] =
127 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
128 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
129 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
130 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
131 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
132 DS * 8, ES * 8, FS * 8, GS * 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
140 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
142 #else /* ! __x86_64__ */
144 /* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146 static /*const*/ int i386_regmap[] =
148 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
149 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
150 EIP * 4, EFL * 4, CS * 4, SS * 4,
151 DS * 4, ES * 4, FS * 4, GS * 4
154 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
158 /* Called by libthread_db. */
161 ps_get_thread_area (const struct ps_prochandle *ph,
162 lwpid_t lwpid, int idx, void **base)
165 int use_64bit = register_size (0) == 8;
172 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
176 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
187 unsigned int desc[4];
189 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
190 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
193 *(int *)base = desc[1];
198 /* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
204 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
207 int use_64bit = register_size (0) == 8;
212 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
214 *addr = (CORE_ADDR) (uintptr_t) base;
223 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
224 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
225 unsigned int desc[4];
227 const int reg_thread_area = 3; /* bits to scale down register value. */
230 collect_register_by_name (regcache, "gs", &gs);
232 idx = gs >> reg_thread_area;
234 if (ptrace (PTRACE_GET_THREAD_AREA,
235 lwpid_of (lwp), (void *) (long) idx, (unsigned long) &desc) < 0)
246 i386_cannot_store_register (int regno)
248 return regno >= I386_NUM_REGS;
252 i386_cannot_fetch_register (int regno)
254 return regno >= I386_NUM_REGS;
258 x86_fill_gregset (struct regcache *regcache, void *buf)
263 if (register_size (0) == 8)
265 for (i = 0; i < X86_64_NUM_REGS; i++)
266 if (x86_64_regmap[i] != -1)
267 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
272 for (i = 0; i < I386_NUM_REGS; i++)
273 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
275 collect_register_by_name (regcache, "orig_eax",
276 ((char *) buf) + ORIG_EAX * 4);
280 x86_store_gregset (struct regcache *regcache, const void *buf)
285 if (register_size (0) == 8)
287 for (i = 0; i < X86_64_NUM_REGS; i++)
288 if (x86_64_regmap[i] != -1)
289 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
294 for (i = 0; i < I386_NUM_REGS; i++)
295 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
297 supply_register_by_name (regcache, "orig_eax",
298 ((char *) buf) + ORIG_EAX * 4);
302 x86_fill_fpregset (struct regcache *regcache, void *buf)
305 i387_cache_to_fxsave (regcache, buf);
307 i387_cache_to_fsave (regcache, buf);
312 x86_store_fpregset (struct regcache *regcache, const void *buf)
315 i387_fxsave_to_cache (regcache, buf);
317 i387_fsave_to_cache (regcache, buf);
324 x86_fill_fpxregset (struct regcache *regcache, void *buf)
326 i387_cache_to_fxsave (regcache, buf);
330 x86_store_fpxregset (struct regcache *regcache, const void *buf)
332 i387_fxsave_to_cache (regcache, buf);
338 x86_fill_xstateregset (struct regcache *regcache, void *buf)
340 i387_cache_to_xsave (regcache, buf);
344 x86_store_xstateregset (struct regcache *regcache, const void *buf)
346 i387_xsave_to_cache (regcache, buf);
349 /* ??? The non-biarch i386 case stores all the i387 regs twice.
350 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
351 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
352 doesn't work. IWBN to avoid the duplication in the case where it
353 does work. Maybe the arch_setup routine could check whether it works
354 and update target_regsets accordingly, maybe by moving target_regsets
355 to linux_target_ops and set the right one there, rather than having to
356 modify the target_regsets global. */
358 struct regset_info target_regsets[] =
360 #ifdef HAVE_PTRACE_GETREGS
361 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
363 x86_fill_gregset, x86_store_gregset },
364 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
365 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
367 # ifdef HAVE_PTRACE_GETFPXREGS
368 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
370 x86_fill_fpxregset, x86_store_fpxregset },
373 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
375 x86_fill_fpregset, x86_store_fpregset },
376 #endif /* HAVE_PTRACE_GETREGS */
377 { 0, 0, 0, -1, -1, NULL, NULL }
381 x86_get_pc (struct regcache *regcache)
383 int use_64bit = register_size (0) == 8;
388 collect_register_by_name (regcache, "rip", &pc);
389 return (CORE_ADDR) pc;
394 collect_register_by_name (regcache, "eip", &pc);
395 return (CORE_ADDR) pc;
400 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
402 int use_64bit = register_size (0) == 8;
406 unsigned long newpc = pc;
407 supply_register_by_name (regcache, "rip", &newpc);
411 unsigned int newpc = pc;
412 supply_register_by_name (regcache, "eip", &newpc);
416 static const unsigned char x86_breakpoint[] = { 0xCC };
417 #define x86_breakpoint_len 1
420 x86_breakpoint_at (CORE_ADDR pc)
424 (*the_target->read_memory) (pc, &c, 1);
431 /* Support for debug registers. */
434 x86_linux_dr_get (ptid_t ptid, int regnum)
439 tid = ptid_get_lwp (ptid);
442 value = ptrace (PTRACE_PEEKUSER, tid,
443 offsetof (struct user, u_debugreg[regnum]), 0);
445 error ("Couldn't read debug register");
451 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
455 tid = ptid_get_lwp (ptid);
458 ptrace (PTRACE_POKEUSER, tid,
459 offsetof (struct user, u_debugreg[regnum]), value);
461 error ("Couldn't write debug register");
464 /* Update the inferior's debug register REGNUM from STATE. */
467 i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
469 struct inferior_list_entry *lp;
471 /* Only need to update the threads of this process. */
472 int pid = pid_of (get_thread_lwp (current_inferior));
474 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
475 fatal ("Invalid debug register %d", regnum);
477 addr = state->dr_mirror[regnum];
479 for (lp = all_lwps.head; lp; lp = lp->next)
481 struct lwp_info *lwp = (struct lwp_info *) lp;
483 /* The actual update is done later, we just mark that the register
485 if (pid_of (lwp) == pid)
486 lwp->arch_private->debug_registers_changed = 1;
490 /* Update the inferior's DR7 debug control register from STATE. */
493 i386_dr_low_set_control (const struct i386_debug_reg_state *state)
495 struct inferior_list_entry *lp;
496 /* Only need to update the threads of this process. */
497 int pid = pid_of (get_thread_lwp (current_inferior));
499 for (lp = all_lwps.head; lp; lp = lp->next)
501 struct lwp_info *lwp = (struct lwp_info *) lp;
503 /* The actual update is done later, we just mark that the register
505 if (pid_of (lwp) == pid)
506 lwp->arch_private->debug_registers_changed = 1;
510 /* Get the value of the DR6 debug status register from the inferior
511 and record it in STATE. */
514 i386_dr_low_get_status (struct i386_debug_reg_state *state)
516 struct lwp_info *lwp = get_thread_lwp (current_inferior);
517 ptid_t ptid = ptid_of (lwp);
519 state->dr_status_mirror = x86_linux_dr_get (ptid, DR_STATUS);
522 /* Watchpoint support. */
525 x86_insert_point (char type, CORE_ADDR addr, int len)
527 struct process_info *proc = current_process ();
531 return set_gdb_breakpoint_at (addr);
535 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
544 x86_remove_point (char type, CORE_ADDR addr, int len)
546 struct process_info *proc = current_process ();
550 return delete_gdb_breakpoint_at (addr);
554 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
563 x86_stopped_by_watchpoint (void)
565 struct process_info *proc = current_process ();
566 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
570 x86_stopped_data_address (void)
572 struct process_info *proc = current_process ();
574 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
580 /* Called when a new process is created. */
582 static struct arch_process_info *
583 x86_linux_new_process (void)
585 struct arch_process_info *info = xcalloc (1, sizeof (*info));
587 i386_low_init_dregs (&info->debug_reg_state);
592 /* Called when a new thread is detected. */
594 static struct arch_lwp_info *
595 x86_linux_new_thread (void)
597 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
599 info->debug_registers_changed = 1;
604 /* Called when resuming a thread.
605 If the debug regs have changed, update the thread's copies. */
608 x86_linux_prepare_to_resume (struct lwp_info *lwp)
610 ptid_t ptid = ptid_of (lwp);
612 if (lwp->arch_private->debug_registers_changed)
615 int pid = ptid_get_pid (ptid);
616 struct process_info *proc = find_process_pid (pid);
617 struct i386_debug_reg_state *state = &proc->private->arch_private->debug_reg_state;
619 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
620 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
622 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
624 lwp->arch_private->debug_registers_changed = 0;
627 if (lwp->stopped_by_watchpoint)
628 x86_linux_dr_set (ptid, DR_STATUS, 0);
631 /* When GDBSERVER is built as a 64-bit application on linux, the
632 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
633 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
634 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
635 conversion in-place ourselves. */
637 /* These types below (compat_*) define a siginfo type that is layout
638 compatible with the siginfo type exported by the 32-bit userspace
643 typedef int compat_int_t;
644 typedef unsigned int compat_uptr_t;
646 typedef int compat_time_t;
647 typedef int compat_timer_t;
648 typedef int compat_clock_t;
650 struct compat_timeval
652 compat_time_t tv_sec;
656 typedef union compat_sigval
658 compat_int_t sival_int;
659 compat_uptr_t sival_ptr;
662 typedef struct compat_siginfo
670 int _pad[((128 / sizeof (int)) - 3)];
679 /* POSIX.1b timers */
684 compat_sigval_t _sigval;
687 /* POSIX.1b signals */
692 compat_sigval_t _sigval;
701 compat_clock_t _utime;
702 compat_clock_t _stime;
705 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
720 #define cpt_si_pid _sifields._kill._pid
721 #define cpt_si_uid _sifields._kill._uid
722 #define cpt_si_timerid _sifields._timer._tid
723 #define cpt_si_overrun _sifields._timer._overrun
724 #define cpt_si_status _sifields._sigchld._status
725 #define cpt_si_utime _sifields._sigchld._utime
726 #define cpt_si_stime _sifields._sigchld._stime
727 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
728 #define cpt_si_addr _sifields._sigfault._addr
729 #define cpt_si_band _sifields._sigpoll._band
730 #define cpt_si_fd _sifields._sigpoll._fd
732 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
733 In their place is si_timer1,si_timer2. */
735 #define si_timerid si_timer1
738 #define si_overrun si_timer2
742 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
744 memset (to, 0, sizeof (*to));
746 to->si_signo = from->si_signo;
747 to->si_errno = from->si_errno;
748 to->si_code = from->si_code;
752 to->cpt_si_ptr = (intptr_t) from->si_ptr;
754 else if (to->si_code == SI_USER)
756 to->cpt_si_pid = from->si_pid;
757 to->cpt_si_uid = from->si_uid;
759 else if (to->si_code == SI_TIMER)
761 to->cpt_si_timerid = from->si_timerid;
762 to->cpt_si_overrun = from->si_overrun;
763 to->cpt_si_ptr = (intptr_t) from->si_ptr;
767 switch (to->si_signo)
770 to->cpt_si_pid = from->si_pid;
771 to->cpt_si_uid = from->si_uid;
772 to->cpt_si_status = from->si_status;
773 to->cpt_si_utime = from->si_utime;
774 to->cpt_si_stime = from->si_stime;
780 to->cpt_si_addr = (intptr_t) from->si_addr;
783 to->cpt_si_band = from->si_band;
784 to->cpt_si_fd = from->si_fd;
787 to->cpt_si_pid = from->si_pid;
788 to->cpt_si_uid = from->si_uid;
789 to->cpt_si_ptr = (intptr_t) from->si_ptr;
796 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
798 memset (to, 0, sizeof (*to));
800 to->si_signo = from->si_signo;
801 to->si_errno = from->si_errno;
802 to->si_code = from->si_code;
806 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
808 else if (to->si_code == SI_USER)
810 to->si_pid = from->cpt_si_pid;
811 to->si_uid = from->cpt_si_uid;
813 else if (to->si_code == SI_TIMER)
815 to->si_timerid = from->cpt_si_timerid;
816 to->si_overrun = from->cpt_si_overrun;
817 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
821 switch (to->si_signo)
824 to->si_pid = from->cpt_si_pid;
825 to->si_uid = from->cpt_si_uid;
826 to->si_status = from->cpt_si_status;
827 to->si_utime = from->cpt_si_utime;
828 to->si_stime = from->cpt_si_stime;
834 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
837 to->si_band = from->cpt_si_band;
838 to->si_fd = from->cpt_si_fd;
841 to->si_pid = from->cpt_si_pid;
842 to->si_uid = from->cpt_si_uid;
843 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
849 #endif /* __x86_64__ */
851 /* Convert a native/host siginfo object, into/from the siginfo in the
852 layout of the inferiors' architecture. Returns true if any
853 conversion was done; false otherwise. If DIRECTION is 1, then copy
854 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
858 x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
861 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
862 if (register_size (0) == 4)
864 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
865 fatal ("unexpected difference in siginfo");
868 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
870 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
881 /* Update gdbserver_xmltarget. */
884 x86_linux_update_xmltarget (void)
887 struct regset_info *regset;
888 static unsigned long long xcr0;
889 static int have_ptrace_getregset = -1;
890 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
891 static int have_ptrace_getfpxregs = -1;
894 if (!current_inferior)
897 /* Before changing the register cache internal layout or the target
898 regsets, flush the contents of the current valid caches back to
900 regcache_invalidate ();
902 pid = pid_of (get_thread_lwp (current_inferior));
904 if (num_xmm_registers == 8)
905 init_registers_i386_linux ();
907 init_registers_amd64_linux ();
910 # ifdef HAVE_PTRACE_GETFPXREGS
911 if (have_ptrace_getfpxregs == -1)
913 elf_fpxregset_t fpxregs;
915 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
917 have_ptrace_getfpxregs = 0;
918 x86_xcr0 = I386_XSTATE_X87_MASK;
920 /* Disable PTRACE_GETFPXREGS. */
921 for (regset = target_regsets;
922 regset->fill_function != NULL; regset++)
923 if (regset->get_request == PTRACE_GETFPXREGS)
930 have_ptrace_getfpxregs = 1;
933 if (!have_ptrace_getfpxregs)
935 init_registers_i386_mmx_linux ();
939 init_registers_i386_linux ();
947 if (num_xmm_registers == 8)
948 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
950 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
952 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
955 x86_xcr0 = I386_XSTATE_SSE_MASK;
960 /* Check if XSAVE extended state is supported. */
961 if (have_ptrace_getregset == -1)
963 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
966 iov.iov_base = xstateregs;
967 iov.iov_len = sizeof (xstateregs);
969 /* Check if PTRACE_GETREGSET works. */
970 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
973 have_ptrace_getregset = 0;
977 have_ptrace_getregset = 1;
979 /* Get XCR0 from XSAVE extended state at byte 464. */
980 xcr0 = xstateregs[464 / sizeof (long long)];
982 /* Use PTRACE_GETREGSET if it is available. */
983 for (regset = target_regsets;
984 regset->fill_function != NULL; regset++)
985 if (regset->get_request == PTRACE_GETREGSET)
986 regset->size = I386_XSTATE_SIZE (xcr0);
987 else if (regset->type != GENERAL_REGS)
991 if (have_ptrace_getregset)
993 /* AVX is the highest feature we support. */
994 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
999 /* I386 has 8 xmm regs. */
1000 if (num_xmm_registers == 8)
1001 init_registers_i386_avx_linux ();
1003 init_registers_amd64_avx_linux ();
1005 init_registers_i386_avx_linux ();
1011 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1012 PTRACE_GETREGSET. */
1015 x86_linux_process_qsupported (const char *query)
1017 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1018 with "i386" in qSupported query, it supports x86 XML target
1021 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1023 char *copy = xstrdup (query + 13);
1026 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1028 if (strcmp (p, "i386") == 0)
1038 x86_linux_update_xmltarget ();
1041 /* Initialize gdbserver for the architecture of the inferior. */
1044 x86_arch_setup (void)
1047 int pid = pid_of (get_thread_lwp (current_inferior));
1048 char *file = linux_child_pid_to_exec_file (pid);
1049 int use_64bit = elf_64_file_p (file);
1055 /* This can only happen if /proc/<pid>/exe is unreadable,
1056 but "that can't happen" if we've gotten this far.
1057 Fall through and assume this is a 32-bit program. */
1061 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1062 the_low_target.num_regs = -1;
1063 the_low_target.regmap = NULL;
1064 the_low_target.cannot_fetch_register = NULL;
1065 the_low_target.cannot_store_register = NULL;
1067 /* Amd64 has 16 xmm regs. */
1068 num_xmm_registers = 16;
1070 x86_linux_update_xmltarget ();
1075 /* Ok we have a 32-bit inferior. */
1077 the_low_target.num_regs = I386_NUM_REGS;
1078 the_low_target.regmap = i386_regmap;
1079 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1080 the_low_target.cannot_store_register = i386_cannot_store_register;
1082 /* I386 has 8 xmm regs. */
1083 num_xmm_registers = 8;
1085 x86_linux_update_xmltarget ();
1089 x86_supports_tracepoints (void)
1095 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1097 write_inferior_memory (*to, buf, len);
1102 push_opcode (unsigned char *buf, char *op)
1104 unsigned char *buf_org = buf;
1109 unsigned long ul = strtoul (op, &endptr, 16);
1118 return buf - buf_org;
1123 /* Build a jump pad that saves registers and calls a collection
1124 function. Writes a jump instruction to the jump pad to
1125 JJUMPAD_INSN. The caller is responsible to write it in at the
1126 tracepoint address. */
1129 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1130 CORE_ADDR collector,
1133 CORE_ADDR *jump_entry,
1134 unsigned char *jjump_pad_insn,
1135 ULONGEST *jjump_pad_insn_size,
1136 CORE_ADDR *adjusted_insn_addr,
1137 CORE_ADDR *adjusted_insn_addr_end)
1139 unsigned char buf[40];
1141 CORE_ADDR buildaddr = *jump_entry;
1143 /* Build the jump pad. */
1145 /* First, do tracepoint data collection. Save registers. */
1147 /* Need to ensure stack pointer saved first. */
1148 buf[i++] = 0x54; /* push %rsp */
1149 buf[i++] = 0x55; /* push %rbp */
1150 buf[i++] = 0x57; /* push %rdi */
1151 buf[i++] = 0x56; /* push %rsi */
1152 buf[i++] = 0x52; /* push %rdx */
1153 buf[i++] = 0x51; /* push %rcx */
1154 buf[i++] = 0x53; /* push %rbx */
1155 buf[i++] = 0x50; /* push %rax */
1156 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1157 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1158 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1159 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1160 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1161 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1162 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1163 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1164 buf[i++] = 0x9c; /* pushfq */
1165 buf[i++] = 0x48; /* movl <addr>,%rdi */
1167 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1168 i += sizeof (unsigned long);
1169 buf[i++] = 0x57; /* push %rdi */
1170 append_insns (&buildaddr, i, buf);
1172 /* Stack space for the collecting_t object. */
1174 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1175 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1176 memcpy (buf + i, &tpoint, 8);
1178 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1179 i += push_opcode (&buf[i],
1180 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1181 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1182 append_insns (&buildaddr, i, buf);
1186 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1187 memcpy (&buf[i], (void *) &lockaddr, 8);
1189 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1190 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1191 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1192 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1193 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1194 append_insns (&buildaddr, i, buf);
1196 /* Set up the gdb_collect call. */
1197 /* At this point, (stack pointer + 0x18) is the base of our saved
1201 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1202 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1204 /* tpoint address may be 64-bit wide. */
1205 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1206 memcpy (buf + i, &tpoint, 8);
1208 append_insns (&buildaddr, i, buf);
1210 /* The collector function being in the shared library, may be
1211 >31-bits away off the jump pad. */
1213 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1214 memcpy (buf + i, &collector, 8);
1216 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1217 append_insns (&buildaddr, i, buf);
1219 /* Clear the spin-lock. */
1221 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1222 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1223 memcpy (buf + i, &lockaddr, 8);
1225 append_insns (&buildaddr, i, buf);
1227 /* Remove stack that had been used for the collect_t object. */
1229 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1230 append_insns (&buildaddr, i, buf);
1232 /* Restore register state. */
1234 buf[i++] = 0x48; /* add $0x8,%rsp */
1238 buf[i++] = 0x9d; /* popfq */
1239 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1240 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1241 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1242 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1243 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1244 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1245 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1246 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1247 buf[i++] = 0x58; /* pop %rax */
1248 buf[i++] = 0x5b; /* pop %rbx */
1249 buf[i++] = 0x59; /* pop %rcx */
1250 buf[i++] = 0x5a; /* pop %rdx */
1251 buf[i++] = 0x5e; /* pop %rsi */
1252 buf[i++] = 0x5f; /* pop %rdi */
1253 buf[i++] = 0x5d; /* pop %rbp */
1254 buf[i++] = 0x5c; /* pop %rsp */
1255 append_insns (&buildaddr, i, buf);
1257 /* Now, adjust the original instruction to execute in the jump
1259 *adjusted_insn_addr = buildaddr;
1260 relocate_instruction (&buildaddr, tpaddr);
1261 *adjusted_insn_addr_end = buildaddr;
1263 /* Finally, write a jump back to the program. */
1264 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1265 memcpy (buf, jump_insn, sizeof (jump_insn));
1266 memcpy (buf + 1, &offset, 4);
1267 append_insns (&buildaddr, sizeof (jump_insn), buf);
1269 /* The jump pad is now built. Wire in a jump to our jump pad. This
1270 is always done last (by our caller actually), so that we can
1271 install fast tracepoints with threads running. This relies on
1272 the agent's atomic write support. */
1273 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1274 memcpy (buf, jump_insn, sizeof (jump_insn));
1275 memcpy (buf + 1, &offset, 4);
1276 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1277 *jjump_pad_insn_size = sizeof (jump_insn);
1279 /* Return the end address of our pad. */
1280 *jump_entry = buildaddr;
1285 #endif /* __x86_64__ */
1287 /* Build a jump pad that saves registers and calls a collection
1288 function. Writes a jump instruction to the jump pad to
1289 JJUMPAD_INSN. The caller is responsible to write it in at the
1290 tracepoint address. */
1293 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1294 CORE_ADDR collector,
1297 CORE_ADDR *jump_entry,
1298 unsigned char *jjump_pad_insn,
1299 ULONGEST *jjump_pad_insn_size,
1300 CORE_ADDR *adjusted_insn_addr,
1301 CORE_ADDR *adjusted_insn_addr_end)
1303 unsigned char buf[0x100];
1305 CORE_ADDR buildaddr = *jump_entry;
1307 /* Build the jump pad. */
1309 /* First, do tracepoint data collection. Save registers. */
1311 buf[i++] = 0x60; /* pushad */
1312 buf[i++] = 0x68; /* push tpaddr aka $pc */
1313 *((int *)(buf + i)) = (int) tpaddr;
1315 buf[i++] = 0x9c; /* pushf */
1316 buf[i++] = 0x1e; /* push %ds */
1317 buf[i++] = 0x06; /* push %es */
1318 buf[i++] = 0x0f; /* push %fs */
1320 buf[i++] = 0x0f; /* push %gs */
1322 buf[i++] = 0x16; /* push %ss */
1323 buf[i++] = 0x0e; /* push %cs */
1324 append_insns (&buildaddr, i, buf);
1326 /* Stack space for the collecting_t object. */
1328 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1330 /* Build the object. */
1331 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1332 memcpy (buf + i, &tpoint, 4);
1334 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1336 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1337 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1338 append_insns (&buildaddr, i, buf);
1340 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1341 If we cared for it, this could be using xchg alternatively. */
1344 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1345 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1347 memcpy (&buf[i], (void *) &lockaddr, 4);
1349 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1350 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1351 append_insns (&buildaddr, i, buf);
1354 /* Set up arguments to the gdb_collect call. */
1356 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1357 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1358 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1359 append_insns (&buildaddr, i, buf);
1362 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1363 append_insns (&buildaddr, i, buf);
1366 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1367 memcpy (&buf[i], (void *) &tpoint, 4);
1369 append_insns (&buildaddr, i, buf);
1371 buf[0] = 0xe8; /* call <reladdr> */
1372 offset = collector - (buildaddr + sizeof (jump_insn));
1373 memcpy (buf + 1, &offset, 4);
1374 append_insns (&buildaddr, 5, buf);
1375 /* Clean up after the call. */
1376 buf[0] = 0x83; /* add $0x8,%esp */
1379 append_insns (&buildaddr, 3, buf);
1382 /* Clear the spin-lock. This would need the LOCK prefix on older
1385 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1386 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1387 memcpy (buf + i, &lockaddr, 4);
1389 append_insns (&buildaddr, i, buf);
1392 /* Remove stack that had been used for the collect_t object. */
1394 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1395 append_insns (&buildaddr, i, buf);
1398 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1401 buf[i++] = 0x17; /* pop %ss */
1402 buf[i++] = 0x0f; /* pop %gs */
1404 buf[i++] = 0x0f; /* pop %fs */
1406 buf[i++] = 0x07; /* pop %es */
1407 buf[i++] = 0x1f; /* pop %de */
1408 buf[i++] = 0x9d; /* popf */
1409 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1412 buf[i++] = 0x61; /* popad */
1413 append_insns (&buildaddr, i, buf);
1415 /* Now, adjust the original instruction to execute in the jump
1417 *adjusted_insn_addr = buildaddr;
1418 relocate_instruction (&buildaddr, tpaddr);
1419 *adjusted_insn_addr_end = buildaddr;
1421 /* Write the jump back to the program. */
1422 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1423 memcpy (buf, jump_insn, sizeof (jump_insn));
1424 memcpy (buf + 1, &offset, 4);
1425 append_insns (&buildaddr, sizeof (jump_insn), buf);
1427 /* The jump pad is now built. Wire in a jump to our jump pad. This
1428 is always done last (by our caller actually), so that we can
1429 install fast tracepoints with threads running. This relies on
1430 the agent's atomic write support. */
1431 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1432 memcpy (buf, jump_insn, sizeof (jump_insn));
1433 memcpy (buf + 1, &offset, 4);
1434 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1435 *jjump_pad_insn_size = sizeof (jump_insn);
1437 /* Return the end address of our pad. */
1438 *jump_entry = buildaddr;
1444 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1445 CORE_ADDR collector,
1448 CORE_ADDR *jump_entry,
1449 unsigned char *jjump_pad_insn,
1450 ULONGEST *jjump_pad_insn_size,
1451 CORE_ADDR *adjusted_insn_addr,
1452 CORE_ADDR *adjusted_insn_addr_end)
1455 if (register_size (0) == 8)
1456 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1457 collector, lockaddr,
1458 orig_size, jump_entry,
1460 jjump_pad_insn_size,
1462 adjusted_insn_addr_end);
1465 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1466 collector, lockaddr,
1467 orig_size, jump_entry,
1469 jjump_pad_insn_size,
1471 adjusted_insn_addr_end);
1475 add_insns (unsigned char *start, int len)
1477 CORE_ADDR buildaddr = current_insn_ptr;
1480 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1481 len, paddress (buildaddr));
1483 append_insns (&buildaddr, len, start);
1484 current_insn_ptr = buildaddr;
1487 /* A function used to trick optimizers. */
1495 /* Our general strategy for emitting code is to avoid specifying raw
1496 bytes whenever possible, and instead copy a block of inline asm
1497 that is embedded in the function. This is a little messy, because
1498 we need to keep the compiler from discarding what looks like dead
1499 code, plus suppress various warnings. */
1501 #define EMIT_ASM(NAME,INSNS) \
1502 { extern unsigned char start_ ## NAME, end_ ## NAME; \
1503 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1504 if (always_true ()) \
1505 goto skipover ## NAME; \
1506 __asm__ ("start_" #NAME ":\n\t" INSNS "\n\tend_" #NAME ":\n\t"); \
1513 #define EMIT_ASM32(NAME,INSNS) \
1514 { extern unsigned char start_ ## NAME, end_ ## NAME; \
1515 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1516 if (always_true ()) \
1517 goto skipover ## NAME; \
1518 __asm__ (".code32\n\tstart_" #NAME ":\n\t" INSNS "\n\tend_" #NAME ":\n" \
1525 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1532 amd64_emit_prologue (void)
1534 EMIT_ASM (amd64_prologue,
1536 "movq %rsp,%rbp\n\t"
1537 "sub $0x20,%rsp\n\t"
1538 "movq %rdi,-8(%rbp)\n\t"
1539 "movq %rsi,-16(%rbp)");
1544 amd64_emit_epilogue (void)
1546 EMIT_ASM (amd64_epilogue,
1547 "movq -16(%rbp),%rdi\n\t"
1548 "movq %rax,(%rdi)\n\t"
1555 amd64_emit_add (void)
1557 EMIT_ASM (amd64_add,
1558 "add (%rsp),%rax\n\t"
1559 "lea 0x8(%rsp),%rsp");
1563 amd64_emit_sub (void)
1565 EMIT_ASM (amd64_sub,
1566 "sub %rax,(%rsp)\n\t"
1571 amd64_emit_mul (void)
1577 amd64_emit_lsh (void)
1583 amd64_emit_rsh_signed (void)
1589 amd64_emit_rsh_unsigned (void)
1595 amd64_emit_ext (int arg)
1600 EMIT_ASM (amd64_ext_8,
1606 EMIT_ASM (amd64_ext_16,
1611 EMIT_ASM (amd64_ext_32,
1620 amd64_emit_log_not (void)
1622 EMIT_ASM (amd64_log_not,
1623 "test %rax,%rax\n\t"
1629 amd64_emit_bit_and (void)
1631 EMIT_ASM (amd64_and,
1632 "and (%rsp),%rax\n\t"
1633 "lea 0x8(%rsp),%rsp");
1637 amd64_emit_bit_or (void)
1640 "or (%rsp),%rax\n\t"
1641 "lea 0x8(%rsp),%rsp");
1645 amd64_emit_bit_xor (void)
1647 EMIT_ASM (amd64_xor,
1648 "xor (%rsp),%rax\n\t"
1649 "lea 0x8(%rsp),%rsp");
1653 amd64_emit_bit_not (void)
1655 EMIT_ASM (amd64_bit_not,
1656 "xorq $0xffffffffffffffff,%rax");
1660 amd64_emit_equal (void)
1662 EMIT_ASM (amd64_equal,
1663 "cmp %rax,(%rsp)\n\t"
1664 "je .Lamd64_equal_true\n\t"
1666 "jmp .Lamd64_equal_end\n\t"
1667 ".Lamd64_equal_true:\n\t"
1669 ".Lamd64_equal_end:\n\t"
1670 "lea 0x8(%rsp),%rsp");
1674 amd64_emit_less_signed (void)
1676 EMIT_ASM (amd64_less_signed,
1677 "cmp %rax,(%rsp)\n\t"
1678 "jl .Lamd64_less_signed_true\n\t"
1680 "jmp .Lamd64_less_signed_end\n\t"
1681 ".Lamd64_less_signed_true:\n\t"
1683 ".Lamd64_less_signed_end:\n\t"
1684 "lea 0x8(%rsp),%rsp");
1688 amd64_emit_less_unsigned (void)
1690 EMIT_ASM (amd64_less_unsigned,
1691 "cmp %rax,(%rsp)\n\t"
1692 "jb .Lamd64_less_unsigned_true\n\t"
1694 "jmp .Lamd64_less_unsigned_end\n\t"
1695 ".Lamd64_less_unsigned_true:\n\t"
1697 ".Lamd64_less_unsigned_end:\n\t"
1698 "lea 0x8(%rsp),%rsp");
1702 amd64_emit_ref (int size)
1707 EMIT_ASM (amd64_ref1,
1711 EMIT_ASM (amd64_ref2,
1715 EMIT_ASM (amd64_ref4,
1716 "movl (%rax),%eax");
1719 EMIT_ASM (amd64_ref8,
1720 "movq (%rax),%rax");
1726 amd64_emit_if_goto (int *offset_p, int *size_p)
1728 EMIT_ASM (amd64_if_goto,
1732 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1740 amd64_emit_goto (int *offset_p, int *size_p)
1742 EMIT_ASM (amd64_goto,
1743 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1751 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1753 int diff = (to - (from + size));
1754 unsigned char buf[sizeof (int)];
1762 memcpy (buf, &diff, sizeof (int));
1763 write_inferior_memory (from, buf, sizeof (int));
1767 amd64_emit_const (int64_t num)
1769 unsigned char buf[16];
1771 CORE_ADDR buildaddr = current_insn_ptr;
1774 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1775 *((int64_t *) (&buf[i])) = num;
1777 append_insns (&buildaddr, i, buf);
1778 current_insn_ptr = buildaddr;
1782 amd64_emit_call (CORE_ADDR fn)
1784 unsigned char buf[16];
1786 CORE_ADDR buildaddr;
1789 /* The destination function being in the shared library, may be
1790 >31-bits away off the compiled code pad. */
1792 buildaddr = current_insn_ptr;
1794 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1798 if (offset64 > INT_MAX || offset64 < INT_MIN)
1800 /* Offset is too large for a call. Use callq, but that requires
1801 a register, so avoid it if possible. Use r10, since it is
1802 call-clobbered, we don't have to push/pop it. */
1803 buf[i++] = 0x48; /* mov $fn,%r10 */
1805 memcpy (buf + i, &fn, 8);
1807 buf[i++] = 0xff; /* callq *%r10 */
1812 int offset32 = offset64; /* we know we can't overflow here. */
1813 memcpy (buf + i, &offset32, 4);
1817 append_insns (&buildaddr, i, buf);
1818 current_insn_ptr = buildaddr;
1822 amd64_emit_reg (int reg)
1824 unsigned char buf[16];
1826 CORE_ADDR buildaddr;
1828 /* Assume raw_regs is still in %rdi. */
1829 buildaddr = current_insn_ptr;
1831 buf[i++] = 0xbe; /* mov $<n>,%esi */
1832 *((int *) (&buf[i])) = reg;
1834 append_insns (&buildaddr, i, buf);
1835 current_insn_ptr = buildaddr;
1836 amd64_emit_call (get_raw_reg_func_addr ());
1840 amd64_emit_pop (void)
1842 EMIT_ASM (amd64_pop,
1847 amd64_emit_stack_flush (void)
1849 EMIT_ASM (amd64_stack_flush,
1854 amd64_emit_zero_ext (int arg)
1859 EMIT_ASM (amd64_zero_ext_8,
1863 EMIT_ASM (amd64_zero_ext_16,
1864 "and $0xffff,%rax");
1867 EMIT_ASM (amd64_zero_ext_32,
1868 "mov $0xffffffff,%rcx\n\t"
1877 amd64_emit_swap (void)
1879 EMIT_ASM (amd64_swap,
1886 amd64_emit_stack_adjust (int n)
1888 unsigned char buf[16];
1890 CORE_ADDR buildaddr = current_insn_ptr;
1893 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1897 /* This only handles adjustments up to 16, but we don't expect any more. */
1899 append_insns (&buildaddr, i, buf);
1900 current_insn_ptr = buildaddr;
1903 /* FN's prototype is `LONGEST(*fn)(int)'. */
1906 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1908 unsigned char buf[16];
1910 CORE_ADDR buildaddr;
1912 buildaddr = current_insn_ptr;
1914 buf[i++] = 0xbf; /* movl $<n>,%edi */
1915 *((int *) (&buf[i])) = arg1;
1917 append_insns (&buildaddr, i, buf);
1918 current_insn_ptr = buildaddr;
1919 amd64_emit_call (fn);
1922 /* FN's prototype is `void(*fn)(int,int64_t)'. */
1925 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1927 unsigned char buf[16];
1929 CORE_ADDR buildaddr;
1931 buildaddr = current_insn_ptr;
1933 buf[i++] = 0xbf; /* movl $<n>,%edi */
1934 *((int *) (&buf[i])) = arg1;
1936 append_insns (&buildaddr, i, buf);
1937 current_insn_ptr = buildaddr;
1938 EMIT_ASM (amd64_void_call_2_a,
1939 /* Save away a copy of the stack top. */
1941 /* Also pass top as the second argument. */
1943 amd64_emit_call (fn);
1944 EMIT_ASM (amd64_void_call_2_b,
1945 /* Restore the stack top, %rax may have been trashed. */
1949 struct emit_ops amd64_emit_ops =
1951 amd64_emit_prologue,
1952 amd64_emit_epilogue,
1957 amd64_emit_rsh_signed,
1958 amd64_emit_rsh_unsigned,
1966 amd64_emit_less_signed,
1967 amd64_emit_less_unsigned,
1971 amd64_write_goto_address,
1976 amd64_emit_stack_flush,
1977 amd64_emit_zero_ext,
1979 amd64_emit_stack_adjust,
1980 amd64_emit_int_call_1,
1981 amd64_emit_void_call_2
1984 #endif /* __x86_64__ */
1987 i386_emit_prologue (void)
1989 EMIT_ASM32 (i386_prologue,
1992 /* At this point, the raw regs base address is at 8(%ebp), and the
1993 value pointer is at 12(%ebp). */
1997 i386_emit_epilogue (void)
1999 EMIT_ASM32 (i386_epilogue,
2000 "mov 12(%ebp),%ecx\n\t"
2001 "mov %eax,(%ecx)\n\t"
2002 "mov %ebx,0x4(%ecx)\n\t"
2009 i386_emit_add (void)
2011 EMIT_ASM32 (i386_add,
2012 "add (%esp),%eax\n\t"
2013 "adc 0x4(%esp),%ebx\n\t"
2014 "lea 0x8(%esp),%esp");
2018 i386_emit_sub (void)
2020 EMIT_ASM32 (i386_sub,
2021 "subl %eax,(%esp)\n\t"
2022 "sbbl %ebx,4(%esp)\n\t"
2028 i386_emit_mul (void)
2034 i386_emit_lsh (void)
2040 i386_emit_rsh_signed (void)
2046 i386_emit_rsh_unsigned (void)
2052 i386_emit_ext (int arg)
2057 EMIT_ASM32 (i386_ext_8,
2060 "movl %eax,%ebx\n\t"
2064 EMIT_ASM32 (i386_ext_16,
2066 "movl %eax,%ebx\n\t"
2070 EMIT_ASM32 (i386_ext_32,
2071 "movl %eax,%ebx\n\t"
2080 i386_emit_log_not (void)
2082 EMIT_ASM32 (i386_log_not,
2084 "test %eax,%eax\n\t"
2091 i386_emit_bit_and (void)
2093 EMIT_ASM32 (i386_and,
2094 "and (%esp),%eax\n\t"
2095 "and 0x4(%esp),%ebx\n\t"
2096 "lea 0x8(%esp),%esp");
2100 i386_emit_bit_or (void)
2102 EMIT_ASM32 (i386_or,
2103 "or (%esp),%eax\n\t"
2104 "or 0x4(%esp),%ebx\n\t"
2105 "lea 0x8(%esp),%esp");
2109 i386_emit_bit_xor (void)
2111 EMIT_ASM32 (i386_xor,
2112 "xor (%esp),%eax\n\t"
2113 "xor 0x4(%esp),%ebx\n\t"
2114 "lea 0x8(%esp),%esp");
2118 i386_emit_bit_not (void)
2120 EMIT_ASM32 (i386_bit_not,
2121 "xor $0xffffffff,%eax\n\t"
2122 "xor $0xffffffff,%ebx\n\t");
2126 i386_emit_equal (void)
2128 EMIT_ASM32 (i386_equal,
2129 "cmpl %ebx,4(%esp)\n\t"
2130 "jne .Li386_equal_false\n\t"
2131 "cmpl %eax,(%esp)\n\t"
2132 "je .Li386_equal_true\n\t"
2133 ".Li386_equal_false:\n\t"
2135 "jmp .Li386_equal_end\n\t"
2136 ".Li386_equal_true:\n\t"
2138 ".Li386_equal_end:\n\t"
2140 "lea 0x8(%esp),%esp");
2144 i386_emit_less_signed (void)
2146 EMIT_ASM32 (i386_less_signed,
2147 "cmpl %ebx,4(%esp)\n\t"
2148 "jl .Li386_less_signed_true\n\t"
2149 "jne .Li386_less_signed_false\n\t"
2150 "cmpl %eax,(%esp)\n\t"
2151 "jl .Li386_less_signed_true\n\t"
2152 ".Li386_less_signed_false:\n\t"
2154 "jmp .Li386_less_signed_end\n\t"
2155 ".Li386_less_signed_true:\n\t"
2157 ".Li386_less_signed_end:\n\t"
2159 "lea 0x8(%esp),%esp");
2163 i386_emit_less_unsigned (void)
2165 EMIT_ASM32 (i386_less_unsigned,
2166 "cmpl %ebx,4(%esp)\n\t"
2167 "jb .Li386_less_unsigned_true\n\t"
2168 "jne .Li386_less_unsigned_false\n\t"
2169 "cmpl %eax,(%esp)\n\t"
2170 "jb .Li386_less_unsigned_true\n\t"
2171 ".Li386_less_unsigned_false:\n\t"
2173 "jmp .Li386_less_unsigned_end\n\t"
2174 ".Li386_less_unsigned_true:\n\t"
2176 ".Li386_less_unsigned_end:\n\t"
2178 "lea 0x8(%esp),%esp");
2182 i386_emit_ref (int size)
2187 EMIT_ASM32 (i386_ref1,
2191 EMIT_ASM32 (i386_ref2,
2195 EMIT_ASM32 (i386_ref4,
2196 "movl (%eax),%eax");
2199 EMIT_ASM32 (i386_ref8,
2200 "movl 4(%eax),%ebx\n\t"
2201 "movl (%eax),%eax");
2207 i386_emit_if_goto (int *offset_p, int *size_p)
2209 EMIT_ASM32 (i386_if_goto,
2215 /* Don't trust the assembler to choose the right jump */
2216 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2219 *offset_p = 11; /* be sure that this matches the sequence above */
2225 i386_emit_goto (int *offset_p, int *size_p)
2227 EMIT_ASM32 (i386_goto,
2228 /* Don't trust the assembler to choose the right jump */
2229 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2237 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2239 int diff = (to - (from + size));
2240 unsigned char buf[sizeof (int)];
2242 /* We're only doing 4-byte sizes at the moment. */
2249 memcpy (buf, &diff, sizeof (int));
2250 write_inferior_memory (from, buf, sizeof (int));
2254 i386_emit_const (int64_t num)
2256 unsigned char buf[16];
2258 CORE_ADDR buildaddr = current_insn_ptr;
2261 buf[i++] = 0xb8; /* mov $<n>,%eax */
2262 *((int *) (&buf[i])) = (num & 0xffffffff);
2264 hi = ((num >> 32) & 0xffffffff);
2267 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2268 *((int *) (&buf[i])) = hi;
2273 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2275 append_insns (&buildaddr, i, buf);
2276 current_insn_ptr = buildaddr;
2280 i386_emit_call (CORE_ADDR fn)
2282 unsigned char buf[16];
2284 CORE_ADDR buildaddr;
2286 buildaddr = current_insn_ptr;
2288 buf[i++] = 0xe8; /* call <reladdr> */
2289 offset = ((int) fn) - (buildaddr + 5);
2290 memcpy (buf + 1, &offset, 4);
2291 append_insns (&buildaddr, 5, buf);
2292 current_insn_ptr = buildaddr;
2296 i386_emit_reg (int reg)
2298 unsigned char buf[16];
2300 CORE_ADDR buildaddr;
2302 EMIT_ASM32 (i386_reg_a,
2304 buildaddr = current_insn_ptr;
2306 buf[i++] = 0xb8; /* mov $<n>,%eax */
2307 *((int *) (&buf[i])) = reg;
2309 append_insns (&buildaddr, i, buf);
2310 current_insn_ptr = buildaddr;
2311 EMIT_ASM32 (i386_reg_b,
2312 "mov %eax,4(%esp)\n\t"
2313 "mov 8(%ebp),%eax\n\t"
2315 i386_emit_call (get_raw_reg_func_addr ());
2316 EMIT_ASM32 (i386_reg_c,
2318 "lea 0x8(%esp),%esp");
2322 i386_emit_pop (void)
2324 EMIT_ASM32 (i386_pop,
2330 i386_emit_stack_flush (void)
2332 EMIT_ASM32 (i386_stack_flush,
2338 i386_emit_zero_ext (int arg)
2343 EMIT_ASM32 (i386_zero_ext_8,
2344 "and $0xff,%eax\n\t"
2348 EMIT_ASM32 (i386_zero_ext_16,
2349 "and $0xffff,%eax\n\t"
2353 EMIT_ASM32 (i386_zero_ext_32,
2362 i386_emit_swap (void)
2364 EMIT_ASM32 (i386_swap,
2374 i386_emit_stack_adjust (int n)
2376 unsigned char buf[16];
2378 CORE_ADDR buildaddr = current_insn_ptr;
2381 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2385 append_insns (&buildaddr, i, buf);
2386 current_insn_ptr = buildaddr;
2389 /* FN's prototype is `LONGEST(*fn)(int)'. */
2392 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2394 unsigned char buf[16];
2396 CORE_ADDR buildaddr;
2398 EMIT_ASM32 (i386_int_call_1_a,
2399 /* Reserve a bit of stack space. */
2401 /* Put the one argument on the stack. */
2402 buildaddr = current_insn_ptr;
2404 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2407 *((int *) (&buf[i])) = arg1;
2409 append_insns (&buildaddr, i, buf);
2410 current_insn_ptr = buildaddr;
2411 i386_emit_call (fn);
2412 EMIT_ASM32 (i386_int_call_1_c,
2414 "lea 0x8(%esp),%esp");
2417 /* FN's prototype is `void(*fn)(int,int64_t)'. */
2420 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2422 unsigned char buf[16];
2424 CORE_ADDR buildaddr;
2426 EMIT_ASM32 (i386_void_call_2_a,
2427 /* Preserve %eax only; we don't have to worry about %ebx. */
2429 /* Reserve a bit of stack space for arguments. */
2430 "sub $0x10,%esp\n\t"
2431 /* Copy "top" to the second argument position. (Note that
2432 we can't assume function won't scribble on its
2433 arguments, so don't try to restore from this.) */
2434 "mov %eax,4(%esp)\n\t"
2435 "mov %ebx,8(%esp)");
2436 /* Put the first argument on the stack. */
2437 buildaddr = current_insn_ptr;
2439 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2442 *((int *) (&buf[i])) = arg1;
2444 append_insns (&buildaddr, i, buf);
2445 current_insn_ptr = buildaddr;
2446 i386_emit_call (fn);
2447 EMIT_ASM32 (i386_void_call_2_b,
2448 "lea 0x10(%esp),%esp\n\t"
2449 /* Restore original stack top. */
2453 struct emit_ops i386_emit_ops =
2461 i386_emit_rsh_signed,
2462 i386_emit_rsh_unsigned,
2470 i386_emit_less_signed,
2471 i386_emit_less_unsigned,
2475 i386_write_goto_address,
2480 i386_emit_stack_flush,
2483 i386_emit_stack_adjust,
2484 i386_emit_int_call_1,
2485 i386_emit_void_call_2
2489 static struct emit_ops *
2493 int use_64bit = register_size (0) == 8;
2496 return &amd64_emit_ops;
2499 return &i386_emit_ops;
2502 /* This is initialized assuming an amd64 target.
2503 x86_arch_setup will correct it for i386 or amd64 targets. */
2505 struct linux_target_ops the_low_target =
2521 x86_stopped_by_watchpoint,
2522 x86_stopped_data_address,
2523 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2524 native i386 case (no registers smaller than an xfer unit), and are not
2525 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2528 /* need to fix up i386 siginfo if host is amd64 */
2530 x86_linux_new_process,
2531 x86_linux_new_thread,
2532 x86_linux_prepare_to_resume,
2533 x86_linux_process_qsupported,
2534 x86_supports_tracepoints,
2535 x86_get_thread_area,
2536 x86_install_fast_tracepoint_jump_pad,