1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
6 Contributed by Jiri Smid, SuSE Labs.
8 This file is part of GDB.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "opcode/i386.h"
26 #include "arch-utils.h"
28 #include "dummy-frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
40 #include "gdb_assert.h"
42 #include "amd64-tdep.h"
43 #include "i387-tdep.h"
45 /* Note that the AMD64 architecture was previously known as x86-64.
46 The latter is (forever) engraved into the canonical system name as
47 returned by config.guess, and used as the name for the AMD64 port
48 of GNU/Linux. The BSD's have renamed their ports to amd64; they
49 don't like to shout. For GDB we prefer the amd64_-prefix over the
50 x86_64_-prefix since it's so much easier to type. */
52 /* Register information. */
54 static const char *amd64_register_names[] =
56 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
58 /* %r8 is indeed register number 8. */
59 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
60 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
62 /* %st0 is register number 24. */
63 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
64 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
66 /* %xmm0 is register number 40. */
67 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
68 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
72 /* Total number of registers. */
73 #define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
75 /* Return the name of register REGNUM. */
78 amd64_register_name (struct gdbarch *gdbarch, int regnum)
80 if (regnum >= 0 && regnum < AMD64_NUM_REGS)
81 return amd64_register_names[regnum];
86 /* Return the GDB type object for the "standard" data type of data in
90 amd64_register_type (struct gdbarch *gdbarch, int regnum)
92 if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_RDI_REGNUM)
93 return builtin_type (gdbarch)->builtin_int64;
94 if (regnum == AMD64_RBP_REGNUM || regnum == AMD64_RSP_REGNUM)
95 return builtin_type (gdbarch)->builtin_data_ptr;
96 if (regnum >= AMD64_R8_REGNUM && regnum <= AMD64_R15_REGNUM)
97 return builtin_type (gdbarch)->builtin_int64;
98 if (regnum == AMD64_RIP_REGNUM)
99 return builtin_type (gdbarch)->builtin_func_ptr;
100 if (regnum == AMD64_EFLAGS_REGNUM)
101 return i386_eflags_type (gdbarch);
102 if (regnum >= AMD64_CS_REGNUM && regnum <= AMD64_GS_REGNUM)
103 return builtin_type (gdbarch)->builtin_int32;
104 if (regnum >= AMD64_ST0_REGNUM && regnum <= AMD64_ST0_REGNUM + 7)
105 return i387_ext_type (gdbarch);
106 if (regnum >= AMD64_FCTRL_REGNUM && regnum <= AMD64_FCTRL_REGNUM + 7)
107 return builtin_type (gdbarch)->builtin_int32;
108 if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
109 return i386_sse_type (gdbarch);
110 if (regnum == AMD64_MXCSR_REGNUM)
111 return i386_mxcsr_type (gdbarch);
113 internal_error (__FILE__, __LINE__, _("invalid regnum"));
116 /* DWARF Register Number Mapping as defined in the System V psABI,
119 static int amd64_dwarf_regmap[] =
121 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
122 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
123 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
124 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
126 /* Frame Pointer Register RBP. */
129 /* Stack Pointer Register RSP. */
132 /* Extended Integer Registers 8 - 15. */
133 8, 9, 10, 11, 12, 13, 14, 15,
135 /* Return Address RA. Mapped to RIP. */
138 /* SSE Registers 0 - 7. */
139 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
140 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
141 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
142 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
144 /* Extended SSE Registers 8 - 15. */
145 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
146 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
147 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
148 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
150 /* Floating Point Registers 0-7. */
151 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
152 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
153 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
154 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
156 /* Control and Status Flags Register. */
159 /* Selector Registers. */
169 /* Segment Base Address Registers. */
175 /* Special Selector Registers. */
179 /* Floating Point Control Registers. */
185 static const int amd64_dwarf_regmap_len =
186 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
188 /* Convert DWARF register number REG to the appropriate register
189 number used by GDB. */
192 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
196 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
197 regnum = amd64_dwarf_regmap[reg];
200 warning (_("Unmapped DWARF Register #%d encountered."), reg);
205 /* Map architectural register numbers to gdb register numbers. */
207 static const int amd64_arch_regmap[16] =
209 AMD64_RAX_REGNUM, /* %rax */
210 AMD64_RCX_REGNUM, /* %rcx */
211 AMD64_RDX_REGNUM, /* %rdx */
212 AMD64_RBX_REGNUM, /* %rbx */
213 AMD64_RSP_REGNUM, /* %rsp */
214 AMD64_RBP_REGNUM, /* %rbp */
215 AMD64_RSI_REGNUM, /* %rsi */
216 AMD64_RDI_REGNUM, /* %rdi */
217 AMD64_R8_REGNUM, /* %r8 */
218 AMD64_R9_REGNUM, /* %r9 */
219 AMD64_R10_REGNUM, /* %r10 */
220 AMD64_R11_REGNUM, /* %r11 */
221 AMD64_R12_REGNUM, /* %r12 */
222 AMD64_R13_REGNUM, /* %r13 */
223 AMD64_R14_REGNUM, /* %r14 */
224 AMD64_R15_REGNUM /* %r15 */
227 static const int amd64_arch_regmap_len =
228 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
230 /* Convert architectural register number REG to the appropriate register
231 number used by GDB. */
234 amd64_arch_reg_to_regnum (int reg)
236 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
238 return amd64_arch_regmap[reg];
243 /* Register classes as defined in the psABI. */
257 /* Return the union class of CLASS1 and CLASS2. See the psABI for
260 static enum amd64_reg_class
261 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
263 /* Rule (a): If both classes are equal, this is the resulting class. */
264 if (class1 == class2)
267 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
268 is the other class. */
269 if (class1 == AMD64_NO_CLASS)
271 if (class2 == AMD64_NO_CLASS)
274 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
275 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
278 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
279 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
280 return AMD64_INTEGER;
282 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
283 MEMORY is used as class. */
284 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
285 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
286 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
289 /* Rule (f): Otherwise class SSE is used. */
293 static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
295 /* Return non-zero if TYPE is a non-POD structure or union type. */
298 amd64_non_pod_p (struct type *type)
300 /* ??? A class with a base class certainly isn't POD, but does this
301 catch all non-POD structure types? */
302 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
308 /* Classify TYPE according to the rules for aggregate (structures and
309 arrays) and union types, and store the result in CLASS. */
312 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
314 int len = TYPE_LENGTH (type);
316 /* 1. If the size of an object is larger than two eightbytes, or in
317 C++, is a non-POD structure or union type, or contains
318 unaligned fields, it has class memory. */
319 if (len > 16 || amd64_non_pod_p (type))
321 class[0] = class[1] = AMD64_MEMORY;
325 /* 2. Both eightbytes get initialized to class NO_CLASS. */
326 class[0] = class[1] = AMD64_NO_CLASS;
328 /* 3. Each field of an object is classified recursively so that
329 always two fields are considered. The resulting class is
330 calculated according to the classes of the fields in the
333 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
335 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
337 /* All fields in an array have the same type. */
338 amd64_classify (subtype, class);
339 if (len > 8 && class[1] == AMD64_NO_CLASS)
346 /* Structure or union. */
347 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
348 || TYPE_CODE (type) == TYPE_CODE_UNION);
350 for (i = 0; i < TYPE_NFIELDS (type); i++)
352 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
353 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
354 enum amd64_reg_class subclass[2];
356 /* Ignore static fields. */
357 if (field_is_static (&TYPE_FIELD (type, i)))
360 gdb_assert (pos == 0 || pos == 1);
362 amd64_classify (subtype, subclass);
363 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
365 class[1] = amd64_merge_classes (class[1], subclass[1]);
369 /* 4. Then a post merger cleanup is done: */
371 /* Rule (a): If one of the classes is MEMORY, the whole argument is
373 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
374 class[0] = class[1] = AMD64_MEMORY;
376 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
378 if (class[0] == AMD64_SSEUP)
379 class[0] = AMD64_SSE;
380 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
381 class[1] = AMD64_SSE;
384 /* Classify TYPE, and store the result in CLASS. */
387 amd64_classify (struct type *type, enum amd64_reg_class class[2])
389 enum type_code code = TYPE_CODE (type);
390 int len = TYPE_LENGTH (type);
392 class[0] = class[1] = AMD64_NO_CLASS;
394 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
395 long, long long, and pointers are in the INTEGER class. Similarly,
396 range types, used by languages such as Ada, are also in the INTEGER
398 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
399 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
400 || code == TYPE_CODE_CHAR
401 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
402 && (len == 1 || len == 2 || len == 4 || len == 8))
403 class[0] = AMD64_INTEGER;
405 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
407 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
408 && (len == 4 || len == 8))
410 class[0] = AMD64_SSE;
412 /* Arguments of types __float128, _Decimal128 and __m128 are split into
413 two halves. The least significant ones belong to class SSE, the most
414 significant one to class SSEUP. */
415 else if (code == TYPE_CODE_DECFLOAT && len == 16)
416 /* FIXME: __float128, __m128. */
417 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
419 /* The 64-bit mantissa of arguments of type long double belongs to
420 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
422 else if (code == TYPE_CODE_FLT && len == 16)
423 /* Class X87 and X87UP. */
424 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
427 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
428 || code == TYPE_CODE_UNION)
429 amd64_classify_aggregate (type, class);
432 static enum return_value_convention
433 amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
434 struct type *type, struct regcache *regcache,
435 gdb_byte *readbuf, const gdb_byte *writebuf)
437 enum amd64_reg_class class[2];
438 int len = TYPE_LENGTH (type);
439 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
440 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
445 gdb_assert (!(readbuf && writebuf));
447 /* 1. Classify the return type with the classification algorithm. */
448 amd64_classify (type, class);
450 /* 2. If the type has class MEMORY, then the caller provides space
451 for the return value and passes the address of this storage in
452 %rdi as if it were the first argument to the function. In effect,
453 this address becomes a hidden first argument.
455 On return %rax will contain the address that has been passed in
456 by the caller in %rdi. */
457 if (class[0] == AMD64_MEMORY)
459 /* As indicated by the comment above, the ABI guarantees that we
460 can always find the return value just after the function has
467 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
468 read_memory (addr, readbuf, TYPE_LENGTH (type));
471 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
474 gdb_assert (class[1] != AMD64_MEMORY);
475 gdb_assert (len <= 16);
477 for (i = 0; len > 0; i++, len -= 8)
485 /* 3. If the class is INTEGER, the next available register
486 of the sequence %rax, %rdx is used. */
487 regnum = integer_regnum[integer_reg++];
491 /* 4. If the class is SSE, the next available SSE register
492 of the sequence %xmm0, %xmm1 is used. */
493 regnum = sse_regnum[sse_reg++];
497 /* 5. If the class is SSEUP, the eightbyte is passed in the
498 upper half of the last used SSE register. */
499 gdb_assert (sse_reg > 0);
500 regnum = sse_regnum[sse_reg - 1];
505 /* 6. If the class is X87, the value is returned on the X87
506 stack in %st0 as 80-bit x87 number. */
507 regnum = AMD64_ST0_REGNUM;
509 i387_return_value (gdbarch, regcache);
513 /* 7. If the class is X87UP, the value is returned together
514 with the previous X87 value in %st0. */
515 gdb_assert (i > 0 && class[0] == AMD64_X87);
516 regnum = AMD64_ST0_REGNUM;
525 gdb_assert (!"Unexpected register class.");
528 gdb_assert (regnum != -1);
531 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
534 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
538 return RETURN_VALUE_REGISTER_CONVENTION;
543 amd64_push_arguments (struct regcache *regcache, int nargs,
544 struct value **args, CORE_ADDR sp, int struct_return)
546 static int integer_regnum[] =
548 AMD64_RDI_REGNUM, /* %rdi */
549 AMD64_RSI_REGNUM, /* %rsi */
550 AMD64_RDX_REGNUM, /* %rdx */
551 AMD64_RCX_REGNUM, /* %rcx */
555 static int sse_regnum[] =
557 /* %xmm0 ... %xmm7 */
558 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
559 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
560 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
561 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
563 struct value **stack_args = alloca (nargs * sizeof (struct value *));
564 int num_stack_args = 0;
565 int num_elements = 0;
571 /* Reserve a register for the "hidden" argument. */
575 for (i = 0; i < nargs; i++)
577 struct type *type = value_type (args[i]);
578 int len = TYPE_LENGTH (type);
579 enum amd64_reg_class class[2];
580 int needed_integer_regs = 0;
581 int needed_sse_regs = 0;
584 /* Classify argument. */
585 amd64_classify (type, class);
587 /* Calculate the number of integer and SSE registers needed for
589 for (j = 0; j < 2; j++)
591 if (class[j] == AMD64_INTEGER)
592 needed_integer_regs++;
593 else if (class[j] == AMD64_SSE)
597 /* Check whether enough registers are available, and if the
598 argument should be passed in registers at all. */
599 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
600 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
601 || (needed_integer_regs == 0 && needed_sse_regs == 0))
603 /* The argument will be passed on the stack. */
604 num_elements += ((len + 7) / 8);
605 stack_args[num_stack_args++] = args[i];
609 /* The argument will be passed in registers. */
610 const gdb_byte *valbuf = value_contents (args[i]);
613 gdb_assert (len <= 16);
615 for (j = 0; len > 0; j++, len -= 8)
623 regnum = integer_regnum[integer_reg++];
627 regnum = sse_regnum[sse_reg++];
631 gdb_assert (sse_reg > 0);
632 regnum = sse_regnum[sse_reg - 1];
637 gdb_assert (!"Unexpected register class.");
640 gdb_assert (regnum != -1);
641 memset (buf, 0, sizeof buf);
642 memcpy (buf, valbuf + j * 8, min (len, 8));
643 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
648 /* Allocate space for the arguments on the stack. */
649 sp -= num_elements * 8;
651 /* The psABI says that "The end of the input argument area shall be
652 aligned on a 16 byte boundary." */
655 /* Write out the arguments to the stack. */
656 for (i = 0; i < num_stack_args; i++)
658 struct type *type = value_type (stack_args[i]);
659 const gdb_byte *valbuf = value_contents (stack_args[i]);
660 int len = TYPE_LENGTH (type);
662 write_memory (sp + element * 8, valbuf, len);
663 element += ((len + 7) / 8);
666 /* The psABI says that "For calls that may call functions that use
667 varargs or stdargs (prototype-less calls or calls to functions
668 containing ellipsis (...) in the declaration) %al is used as
669 hidden argument to specify the number of SSE registers used. */
670 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
675 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
676 struct regcache *regcache, CORE_ADDR bp_addr,
677 int nargs, struct value **args, CORE_ADDR sp,
678 int struct_return, CORE_ADDR struct_addr)
680 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
683 /* Pass arguments. */
684 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
686 /* Pass "hidden" argument". */
689 store_unsigned_integer (buf, 8, byte_order, struct_addr);
690 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
693 /* Store return address. */
695 store_unsigned_integer (buf, 8, byte_order, bp_addr);
696 write_memory (sp, buf, 8);
698 /* Finally, update the stack pointer... */
699 store_unsigned_integer (buf, 8, byte_order, sp);
700 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
702 /* ...and fake a frame pointer. */
703 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
708 /* Displaced instruction handling. */
710 /* A partially decoded instruction.
711 This contains enough details for displaced stepping purposes. */
715 /* The number of opcode bytes. */
717 /* The offset of the rex prefix or -1 if not present. */
719 /* The offset to the first opcode byte. */
721 /* The offset to the modrm byte or -1 if not present. */
724 /* The raw instruction. */
728 struct displaced_step_closure
730 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
735 /* Details of the instruction. */
736 struct amd64_insn insn_details;
738 /* Amount of space allocated to insn_buf. */
741 /* The possibly modified insn.
742 This is a variable-length field. */
743 gdb_byte insn_buf[1];
746 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
747 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
748 at which point delete these in favor of libopcodes' versions). */
750 static const unsigned char onebyte_has_modrm[256] = {
751 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
752 /* ------------------------------- */
753 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
754 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
755 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
756 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
757 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
758 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
759 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
760 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
761 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
762 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
763 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
764 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
765 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
766 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
767 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
768 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
769 /* ------------------------------- */
770 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
773 static const unsigned char twobyte_has_modrm[256] = {
774 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
775 /* ------------------------------- */
776 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
777 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
778 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
779 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
780 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
781 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
782 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
783 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
784 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
785 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
786 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
787 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
788 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
789 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
790 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
791 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
792 /* ------------------------------- */
793 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
796 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
799 rex_prefix_p (gdb_byte pfx)
801 return REX_PREFIX_P (pfx);
804 /* Skip the legacy instruction prefixes in INSN.
805 We assume INSN is properly sentineled so we don't have to worry
806 about falling off the end of the buffer. */
809 amd64_skip_prefixes (gdb_byte *insn)
815 case DATA_PREFIX_OPCODE:
816 case ADDR_PREFIX_OPCODE:
817 case CS_PREFIX_OPCODE:
818 case DS_PREFIX_OPCODE:
819 case ES_PREFIX_OPCODE:
820 case FS_PREFIX_OPCODE:
821 case GS_PREFIX_OPCODE:
822 case SS_PREFIX_OPCODE:
823 case LOCK_PREFIX_OPCODE:
824 case REPE_PREFIX_OPCODE:
825 case REPNE_PREFIX_OPCODE:
837 /* fprintf-function for amd64_insn_length.
838 This function is a nop, we don't want to print anything, we just want to
839 compute the length of the insn. */
841 static int ATTR_FORMAT (printf, 2, 3)
842 amd64_insn_length_fprintf (void *stream, const char *format, ...)
847 /* Initialize a struct disassemble_info for amd64_insn_length. */
850 amd64_insn_length_init_dis (struct gdbarch *gdbarch,
851 struct disassemble_info *di,
852 const gdb_byte *insn, int max_len,
855 init_disassemble_info (di, NULL, amd64_insn_length_fprintf);
857 /* init_disassemble_info installs buffer_read_memory, etc.
858 so we don't need to do that here.
859 The cast is necessary until disassemble_info is const-ified. */
860 di->buffer = (gdb_byte *) insn;
861 di->buffer_length = max_len;
862 di->buffer_vma = addr;
864 di->arch = gdbarch_bfd_arch_info (gdbarch)->arch;
865 di->mach = gdbarch_bfd_arch_info (gdbarch)->mach;
866 di->endian = gdbarch_byte_order (gdbarch);
867 di->endian_code = gdbarch_byte_order_for_code (gdbarch);
869 disassemble_init_for_target (di);
872 /* Return the length in bytes of INSN.
873 MAX_LEN is the size of the buffer containing INSN.
874 libopcodes currently doesn't export a utility to compute the
875 instruction length, so use the disassembler until then. */
878 amd64_insn_length (struct gdbarch *gdbarch,
879 const gdb_byte *insn, int max_len, CORE_ADDR addr)
881 struct disassemble_info di;
883 amd64_insn_length_init_dis (gdbarch, &di, insn, max_len, addr);
885 return gdbarch_print_insn (gdbarch, addr, &di);
888 /* Return an integer register (other than RSP) that is unused as an input
890 In order to not require adding a rex prefix if the insn doesn't already
891 have one, the result is restricted to RAX ... RDI, sans RSP.
892 The register numbering of the result follows architecture ordering,
896 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
898 /* 1 bit for each reg */
899 int used_regs_mask = 0;
901 /* There can be at most 3 int regs used as inputs in an insn, and we have
902 7 to choose from (RAX ... RDI, sans RSP).
903 This allows us to take a conservative approach and keep things simple.
904 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
905 that implicitly specify RAX. */
908 used_regs_mask |= 1 << EAX_REG_NUM;
909 /* Similarily avoid RDX, implicit operand in divides. */
910 used_regs_mask |= 1 << EDX_REG_NUM;
912 used_regs_mask |= 1 << ESP_REG_NUM;
914 /* If the opcode is one byte long and there's no ModRM byte,
915 assume the opcode specifies a register. */
916 if (details->opcode_len == 1 && details->modrm_offset == -1)
917 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
919 /* Mark used regs in the modrm/sib bytes. */
920 if (details->modrm_offset != -1)
922 int modrm = details->raw_insn[details->modrm_offset];
923 int mod = MODRM_MOD_FIELD (modrm);
924 int reg = MODRM_REG_FIELD (modrm);
925 int rm = MODRM_RM_FIELD (modrm);
926 int have_sib = mod != 3 && rm == 4;
928 /* Assume the reg field of the modrm byte specifies a register. */
929 used_regs_mask |= 1 << reg;
933 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
934 int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
935 used_regs_mask |= 1 << base;
936 used_regs_mask |= 1 << index;
940 used_regs_mask |= 1 << rm;
944 gdb_assert (used_regs_mask < 256);
945 gdb_assert (used_regs_mask != 255);
947 /* Finally, find a free reg. */
951 for (i = 0; i < 8; ++i)
953 if (! (used_regs_mask & (1 << i)))
957 /* We shouldn't get here. */
958 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
962 /* Extract the details of INSN that we need. */
965 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
967 gdb_byte *start = insn;
970 details->raw_insn = insn;
972 details->opcode_len = -1;
973 details->rex_offset = -1;
974 details->opcode_offset = -1;
975 details->modrm_offset = -1;
977 /* Skip legacy instruction prefixes. */
978 insn = amd64_skip_prefixes (insn);
980 /* Skip REX instruction prefix. */
981 if (rex_prefix_p (*insn))
983 details->rex_offset = insn - start;
987 details->opcode_offset = insn - start;
989 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
991 /* Two or three-byte opcode. */
993 need_modrm = twobyte_has_modrm[*insn];
995 /* Check for three-byte opcode. */
1005 details->opcode_len = 3;
1008 details->opcode_len = 2;
1014 /* One-byte opcode. */
1015 need_modrm = onebyte_has_modrm[*insn];
1016 details->opcode_len = 1;
1022 details->modrm_offset = insn - start;
1026 /* Update %rip-relative addressing in INSN.
1028 %rip-relative addressing only uses a 32-bit displacement.
1029 32 bits is not enough to be guaranteed to cover the distance between where
1030 the real instruction is and where its copy is.
1031 Convert the insn to use base+disp addressing.
1032 We set base = pc + insn_length so we can leave disp unchanged. */
1035 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1036 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1038 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1039 const struct amd64_insn *insn_details = &dsc->insn_details;
1040 int modrm_offset = insn_details->modrm_offset;
1041 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1045 int arch_tmp_regno, tmp_regno;
1046 ULONGEST orig_value;
1048 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1051 /* Compute the rip-relative address. */
1052 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1053 insn_length = amd64_insn_length (gdbarch, dsc->insn_buf, dsc->max_len, from);
1054 rip_base = from + insn_length;
1056 /* We need a register to hold the address.
1057 Pick one not used in the insn.
1058 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1059 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1060 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1062 /* REX.B should be unset as we were using rip-relative addressing,
1063 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1064 if (insn_details->rex_offset != -1)
1065 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1067 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1068 dsc->tmp_regno = tmp_regno;
1069 dsc->tmp_save = orig_value;
1072 /* Convert the ModRM field to be base+disp. */
1073 dsc->insn_buf[modrm_offset] &= ~0xc7;
1074 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1076 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1078 if (debug_displaced)
1079 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1080 "displaced: using temp reg %d, old value %s, new value %s\n",
1081 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1082 paddress (gdbarch, rip_base));
1086 fixup_displaced_copy (struct gdbarch *gdbarch,
1087 struct displaced_step_closure *dsc,
1088 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1090 const struct amd64_insn *details = &dsc->insn_details;
1092 if (details->modrm_offset != -1)
1094 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1096 if ((modrm & 0xc7) == 0x05)
1098 /* The insn uses rip-relative addressing.
1100 fixup_riprel (gdbarch, dsc, from, to, regs);
1105 struct displaced_step_closure *
1106 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1107 CORE_ADDR from, CORE_ADDR to,
1108 struct regcache *regs)
1110 int len = gdbarch_max_insn_length (gdbarch);
1111 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1112 continually watch for running off the end of the buffer. */
1113 int fixup_sentinel_space = len;
1114 struct displaced_step_closure *dsc =
1115 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1116 gdb_byte *buf = &dsc->insn_buf[0];
1117 struct amd64_insn *details = &dsc->insn_details;
1120 dsc->max_len = len + fixup_sentinel_space;
1122 read_memory (from, buf, len);
1124 /* Set up the sentinel space so we don't have to worry about running
1125 off the end of the buffer. An excessive number of leading prefixes
1126 could otherwise cause this. */
1127 memset (buf + len, 0, fixup_sentinel_space);
1129 amd64_get_insn_details (buf, details);
1131 /* GDB may get control back after the insn after the syscall.
1132 Presumably this is a kernel bug.
1133 If this is a syscall, make sure there's a nop afterwards. */
1137 if (amd64_syscall_p (details, &syscall_length))
1138 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1141 /* Modify the insn to cope with the address where it will be executed from.
1142 In particular, handle any rip-relative addressing. */
1143 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1145 write_memory (to, buf, len);
1147 if (debug_displaced)
1149 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1150 paddress (gdbarch, from), paddress (gdbarch, to));
1151 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1158 amd64_absolute_jmp_p (const struct amd64_insn *details)
1160 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1162 if (insn[0] == 0xff)
1164 /* jump near, absolute indirect (/4) */
1165 if ((insn[1] & 0x38) == 0x20)
1168 /* jump far, absolute indirect (/5) */
1169 if ((insn[1] & 0x38) == 0x28)
1177 amd64_absolute_call_p (const struct amd64_insn *details)
1179 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1181 if (insn[0] == 0xff)
1183 /* Call near, absolute indirect (/2) */
1184 if ((insn[1] & 0x38) == 0x10)
1187 /* Call far, absolute indirect (/3) */
1188 if ((insn[1] & 0x38) == 0x18)
1196 amd64_ret_p (const struct amd64_insn *details)
1198 /* NOTE: gcc can emit "repz ; ret". */
1199 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1203 case 0xc2: /* ret near, pop N bytes */
1204 case 0xc3: /* ret near */
1205 case 0xca: /* ret far, pop N bytes */
1206 case 0xcb: /* ret far */
1207 case 0xcf: /* iret */
1216 amd64_call_p (const struct amd64_insn *details)
1218 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1220 if (amd64_absolute_call_p (details))
1223 /* call near, relative */
1224 if (insn[0] == 0xe8)
1230 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1231 length in bytes. Otherwise, return zero. */
1234 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1236 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1238 if (insn[0] == 0x0f && insn[1] == 0x05)
1247 /* Fix up the state of registers and memory after having single-stepped
1248 a displaced instruction. */
1251 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1252 struct displaced_step_closure *dsc,
1253 CORE_ADDR from, CORE_ADDR to,
1254 struct regcache *regs)
1256 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1257 /* The offset we applied to the instruction's address. */
1258 ULONGEST insn_offset = to - from;
1259 gdb_byte *insn = dsc->insn_buf;
1260 const struct amd64_insn *insn_details = &dsc->insn_details;
1262 if (debug_displaced)
1263 fprintf_unfiltered (gdb_stdlog,
1264 "displaced: fixup (%s, %s), "
1265 "insn = 0x%02x 0x%02x ...\n",
1266 paddress (gdbarch, from), paddress (gdbarch, to),
1269 /* If we used a tmp reg, restore it. */
1273 if (debug_displaced)
1274 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1275 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1276 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1279 /* The list of issues to contend with here is taken from
1280 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1281 Yay for Free Software! */
1283 /* Relocate the %rip back to the program's instruction stream,
1286 /* Except in the case of absolute or indirect jump or call
1287 instructions, or a return instruction, the new rip is relative to
1288 the displaced instruction; make it relative to the original insn.
1289 Well, signal handler returns don't need relocation either, but we use the
1290 value of %rip to recognize those; see below. */
1291 if (! amd64_absolute_jmp_p (insn_details)
1292 && ! amd64_absolute_call_p (insn_details)
1293 && ! amd64_ret_p (insn_details))
1298 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1300 /* A signal trampoline system call changes the %rip, resuming
1301 execution of the main program after the signal handler has
1302 returned. That makes them like 'return' instructions; we
1303 shouldn't relocate %rip.
1305 But most system calls don't, and we do need to relocate %rip.
1307 Our heuristic for distinguishing these cases: if stepping
1308 over the system call instruction left control directly after
1309 the instruction, the we relocate --- control almost certainly
1310 doesn't belong in the displaced copy. Otherwise, we assume
1311 the instruction has put control where it belongs, and leave
1312 it unrelocated. Goodness help us if there are PC-relative
1314 if (amd64_syscall_p (insn_details, &insn_len)
1315 && orig_rip != to + insn_len
1316 /* GDB can get control back after the insn after the syscall.
1317 Presumably this is a kernel bug.
1318 Fixup ensures its a nop, we add one to the length for it. */
1319 && orig_rip != to + insn_len + 1)
1321 if (debug_displaced)
1322 fprintf_unfiltered (gdb_stdlog,
1323 "displaced: syscall changed %%rip; "
1324 "not relocating\n");
1328 ULONGEST rip = orig_rip - insn_offset;
1330 /* If we just stepped over a breakpoint insn, we don't backup
1331 the pc on purpose; this is to match behaviour without
1334 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1336 if (debug_displaced)
1337 fprintf_unfiltered (gdb_stdlog,
1339 "relocated %%rip from %s to %s\n",
1340 paddress (gdbarch, orig_rip),
1341 paddress (gdbarch, rip));
1345 /* If the instruction was PUSHFL, then the TF bit will be set in the
1346 pushed value, and should be cleared. We'll leave this for later,
1347 since GDB already messes up the TF flag when stepping over a
1350 /* If the instruction was a call, the return address now atop the
1351 stack is the address following the copied instruction. We need
1352 to make it the address following the original instruction. */
1353 if (amd64_call_p (insn_details))
1357 const ULONGEST retaddr_len = 8;
1359 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1360 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1361 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1362 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1364 if (debug_displaced)
1365 fprintf_unfiltered (gdb_stdlog,
1366 "displaced: relocated return addr at %s "
1368 paddress (gdbarch, rsp),
1369 paddress (gdbarch, retaddr));
1373 /* The maximum number of saved registers. This should include %rip. */
1374 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1376 struct amd64_frame_cache
1380 CORE_ADDR sp_offset;
1383 /* Saved registers. */
1384 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1388 /* Do we have a frame? */
1392 /* Initialize a frame cache. */
1395 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1401 cache->sp_offset = -8;
1404 /* Saved registers. We initialize these to -1 since zero is a valid
1405 offset (that's where %rbp is supposed to be stored). */
1406 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1407 cache->saved_regs[i] = -1;
1408 cache->saved_sp = 0;
1409 cache->saved_sp_reg = -1;
1411 /* Frameless until proven otherwise. */
1412 cache->frameless_p = 1;
1415 /* Allocate and initialize a frame cache. */
1417 static struct amd64_frame_cache *
1418 amd64_alloc_frame_cache (void)
1420 struct amd64_frame_cache *cache;
1422 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1423 amd64_init_frame_cache (cache);
1427 /* GCC 4.4 and later, can put code in the prologue to realign the
1428 stack pointer. Check whether PC points to such code, and update
1429 CACHE accordingly. Return the first instruction after the code
1430 sequence or CURRENT_PC, whichever is smaller. If we don't
1431 recognize the code, return PC. */
1434 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1435 struct amd64_frame_cache *cache)
1437 /* There are 2 code sequences to re-align stack before the frame
1440 1. Use a caller-saved saved register:
1446 2. Use a callee-saved saved register:
1453 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1455 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1456 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1461 int offset, offset_and;
1463 if (target_read_memory (pc, buf, sizeof buf))
1466 /* Check caller-saved saved register. The first instruction has
1467 to be "leaq 8(%rsp), %reg". */
1468 if ((buf[0] & 0xfb) == 0x48
1473 /* MOD must be binary 10 and R/M must be binary 100. */
1474 if ((buf[2] & 0xc7) != 0x44)
1477 /* REG has register number. */
1478 reg = (buf[2] >> 3) & 7;
1480 /* Check the REX.R bit. */
1488 /* Check callee-saved saved register. The first instruction
1489 has to be "pushq %reg". */
1491 if ((buf[0] & 0xf8) == 0x50)
1493 else if ((buf[0] & 0xf6) == 0x40
1494 && (buf[1] & 0xf8) == 0x50)
1496 /* Check the REX.B bit. */
1497 if ((buf[0] & 1) != 0)
1506 reg += buf[offset] & 0x7;
1510 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1511 if ((buf[offset] & 0xfb) != 0x48
1512 || buf[offset + 1] != 0x8d
1513 || buf[offset + 3] != 0x24
1514 || buf[offset + 4] != 0x10)
1517 /* MOD must be binary 10 and R/M must be binary 100. */
1518 if ((buf[offset + 2] & 0xc7) != 0x44)
1521 /* REG has register number. */
1522 r = (buf[offset + 2] >> 3) & 7;
1524 /* Check the REX.R bit. */
1525 if (buf[offset] == 0x4c)
1528 /* Registers in pushq and leaq have to be the same. */
1535 /* Rigister can't be %rsp nor %rbp. */
1536 if (reg == 4 || reg == 5)
1539 /* The next instruction has to be "andq $-XXX, %rsp". */
1540 if (buf[offset] != 0x48
1541 || buf[offset + 2] != 0xe4
1542 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1545 offset_and = offset;
1546 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1548 /* The next instruction has to be "pushq -8(%reg)". */
1550 if (buf[offset] == 0xff)
1552 else if ((buf[offset] & 0xf6) == 0x40
1553 && buf[offset + 1] == 0xff)
1555 /* Check the REX.B bit. */
1556 if ((buf[offset] & 0x1) != 0)
1563 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1565 if (buf[offset + 1] != 0xf8
1566 || (buf[offset] & 0xf8) != 0x70)
1569 /* R/M has register. */
1570 r += buf[offset] & 7;
1572 /* Registers in leaq and pushq have to be the same. */
1576 if (current_pc > pc + offset_and)
1577 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
1579 return min (pc + offset + 2, current_pc);
1582 /* Do a limited analysis of the prologue at PC and update CACHE
1583 accordingly. Bail out early if CURRENT_PC is reached. Return the
1584 address where the analysis stopped.
1586 We will handle only functions beginning with:
1589 movq %rsp, %rbp 0x48 0x89 0xe5
1591 Any function that doesn't start with this sequence will be assumed
1592 to have no prologue and thus no valid frame pointer in %rbp. */
1595 amd64_analyze_prologue (struct gdbarch *gdbarch,
1596 CORE_ADDR pc, CORE_ADDR current_pc,
1597 struct amd64_frame_cache *cache)
1599 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1600 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1604 if (current_pc <= pc)
1607 pc = amd64_analyze_stack_align (pc, current_pc, cache);
1609 op = read_memory_unsigned_integer (pc, 1, byte_order);
1611 if (op == 0x55) /* pushq %rbp */
1613 /* Take into account that we've executed the `pushq %rbp' that
1614 starts this instruction sequence. */
1615 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
1616 cache->sp_offset += 8;
1618 /* If that's all, return now. */
1619 if (current_pc <= pc + 1)
1622 /* Check for `movq %rsp, %rbp'. */
1623 read_memory (pc + 1, buf, 3);
1624 if (memcmp (buf, proto, 3) != 0)
1627 /* OK, we actually have a frame. */
1628 cache->frameless_p = 0;
1635 /* Return PC of first real instruction. */
1638 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
1640 struct amd64_frame_cache cache;
1643 amd64_init_frame_cache (&cache);
1644 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
1646 if (cache.frameless_p)
1653 /* Normal frames. */
1655 static struct amd64_frame_cache *
1656 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
1658 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1659 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1660 struct amd64_frame_cache *cache;
1667 cache = amd64_alloc_frame_cache ();
1668 *this_cache = cache;
1670 cache->pc = get_frame_func (this_frame);
1672 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
1675 if (cache->saved_sp_reg != -1)
1677 /* Stack pointer has been saved. */
1678 get_frame_register (this_frame, cache->saved_sp_reg, buf);
1679 cache->saved_sp = extract_unsigned_integer(buf, 8, byte_order);
1682 if (cache->frameless_p)
1684 /* We didn't find a valid frame. If we're at the start of a
1685 function, or somewhere half-way its prologue, the function's
1686 frame probably hasn't been fully setup yet. Try to
1687 reconstruct the base address for the stack frame by looking
1688 at the stack pointer. For truly "frameless" functions this
1691 if (cache->saved_sp_reg != -1)
1693 /* We're halfway aligning the stack. */
1694 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1695 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1697 /* This will be added back below. */
1698 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1702 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1703 cache->base = extract_unsigned_integer (buf, 8, byte_order)
1709 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
1710 cache->base = extract_unsigned_integer (buf, 8, byte_order);
1713 /* Now that we have the base address for the stack frame we can
1714 calculate the value of %rsp in the calling frame. */
1715 cache->saved_sp = cache->base + 16;
1717 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1718 frame we find it at the same offset from the reconstructed base
1719 address. If we're halfway aligning the stack, %rip is handled
1720 differently (see above). */
1721 if (!cache->frameless_p || cache->saved_sp_reg == -1)
1722 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
1724 /* Adjust all the saved registers such that they contain addresses
1725 instead of offsets. */
1726 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1727 if (cache->saved_regs[i] != -1)
1728 cache->saved_regs[i] += cache->base;
1734 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1735 struct frame_id *this_id)
1737 struct amd64_frame_cache *cache =
1738 amd64_frame_cache (this_frame, this_cache);
1740 /* This marks the outermost frame. */
1741 if (cache->base == 0)
1744 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1747 static struct value *
1748 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1751 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1752 struct amd64_frame_cache *cache =
1753 amd64_frame_cache (this_frame, this_cache);
1755 gdb_assert (regnum >= 0);
1757 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
1758 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
1760 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
1761 return frame_unwind_got_memory (this_frame, regnum,
1762 cache->saved_regs[regnum]);
1764 return frame_unwind_got_register (this_frame, regnum, regnum);
1767 static const struct frame_unwind amd64_frame_unwind =
1770 amd64_frame_this_id,
1771 amd64_frame_prev_register,
1773 default_frame_sniffer
1777 /* Signal trampolines. */
1779 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1780 64-bit variants. This would require using identical frame caches
1781 on both platforms. */
1783 static struct amd64_frame_cache *
1784 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
1786 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1787 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1788 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1789 struct amd64_frame_cache *cache;
1797 cache = amd64_alloc_frame_cache ();
1799 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1800 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
1802 addr = tdep->sigcontext_addr (this_frame);
1803 gdb_assert (tdep->sc_reg_offset);
1804 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
1805 for (i = 0; i < tdep->sc_num_regs; i++)
1806 if (tdep->sc_reg_offset[i] != -1)
1807 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
1809 *this_cache = cache;
1814 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
1815 void **this_cache, struct frame_id *this_id)
1817 struct amd64_frame_cache *cache =
1818 amd64_sigtramp_frame_cache (this_frame, this_cache);
1820 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
1823 static struct value *
1824 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
1825 void **this_cache, int regnum)
1827 /* Make sure we've initialized the cache. */
1828 amd64_sigtramp_frame_cache (this_frame, this_cache);
1830 return amd64_frame_prev_register (this_frame, this_cache, regnum);
1834 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
1835 struct frame_info *this_frame,
1838 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1840 /* We shouldn't even bother if we don't have a sigcontext_addr
1842 if (tdep->sigcontext_addr == NULL)
1845 if (tdep->sigtramp_p != NULL)
1847 if (tdep->sigtramp_p (this_frame))
1851 if (tdep->sigtramp_start != 0)
1853 CORE_ADDR pc = get_frame_pc (this_frame);
1855 gdb_assert (tdep->sigtramp_end != 0);
1856 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
1863 static const struct frame_unwind amd64_sigtramp_frame_unwind =
1866 amd64_sigtramp_frame_this_id,
1867 amd64_sigtramp_frame_prev_register,
1869 amd64_sigtramp_frame_sniffer
1874 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
1876 struct amd64_frame_cache *cache =
1877 amd64_frame_cache (this_frame, this_cache);
1882 static const struct frame_base amd64_frame_base =
1884 &amd64_frame_unwind,
1885 amd64_frame_base_address,
1886 amd64_frame_base_address,
1887 amd64_frame_base_address
1890 static struct frame_id
1891 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1895 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
1897 return frame_id_build (fp + 16, get_frame_pc (this_frame));
1900 /* 16 byte align the SP per frame requirements. */
1903 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1905 return sp & -(CORE_ADDR)16;
1909 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
1910 in the floating-point register set REGSET to register cache
1911 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
1914 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
1915 int regnum, const void *fpregs, size_t len)
1917 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
1919 gdb_assert (len == tdep->sizeof_fpregset);
1920 amd64_supply_fxsave (regcache, regnum, fpregs);
1923 /* Collect register REGNUM from the register cache REGCACHE and store
1924 it in the buffer specified by FPREGS and LEN as described by the
1925 floating-point register set REGSET. If REGNUM is -1, do this for
1926 all registers in REGSET. */
1929 amd64_collect_fpregset (const struct regset *regset,
1930 const struct regcache *regcache,
1931 int regnum, void *fpregs, size_t len)
1933 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
1935 gdb_assert (len == tdep->sizeof_fpregset);
1936 amd64_collect_fxsave (regcache, regnum, fpregs);
1939 /* Return the appropriate register set for the core section identified
1940 by SECT_NAME and SECT_SIZE. */
1942 static const struct regset *
1943 amd64_regset_from_core_section (struct gdbarch *gdbarch,
1944 const char *sect_name, size_t sect_size)
1946 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1948 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
1950 if (tdep->fpregset == NULL)
1951 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
1952 amd64_collect_fpregset);
1954 return tdep->fpregset;
1957 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
1961 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
1962 %rdi. We expect its value to be a pointer to the jmp_buf structure
1963 from which we extract the address that we will land at. This
1964 address is copied into PC. This routine returns non-zero on
1968 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1972 struct gdbarch *gdbarch = get_frame_arch (frame);
1973 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
1974 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
1976 /* If JB_PC_OFFSET is -1, we have no way to find out where the
1977 longjmp will land. */
1978 if (jb_pc_offset == -1)
1981 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
1982 jb_addr= extract_typed_address
1983 (buf, builtin_type (gdbarch)->builtin_data_ptr);
1984 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
1987 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
1993 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1995 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1997 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
1998 floating-point registers. */
1999 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2001 /* AMD64 has an FPU and 16 SSE registers. */
2002 tdep->st0_regnum = AMD64_ST0_REGNUM;
2003 tdep->num_xmm_regs = 16;
2005 /* This is what all the fuss is about. */
2006 set_gdbarch_long_bit (gdbarch, 64);
2007 set_gdbarch_long_long_bit (gdbarch, 64);
2008 set_gdbarch_ptr_bit (gdbarch, 64);
2010 /* In contrast to the i386, on AMD64 a `long double' actually takes
2011 up 128 bits, even though it's still based on the i387 extended
2012 floating-point format which has only 80 significant bits. */
2013 set_gdbarch_long_double_bit (gdbarch, 128);
2015 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2016 set_gdbarch_register_name (gdbarch, amd64_register_name);
2017 set_gdbarch_register_type (gdbarch, amd64_register_type);
2019 /* Register numbers of various important registers. */
2020 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2021 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2022 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2023 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
2025 /* The "default" register numbering scheme for AMD64 is referred to
2026 as the "DWARF Register Number Mapping" in the System V psABI.
2027 The preferred debugging format for all known AMD64 targets is
2028 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2029 DWARF-1), but we provide the same mapping just in case. This
2030 mapping is also used for stabs, which GCC does support. */
2031 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2032 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2034 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2035 be in use on any of the supported AMD64 targets. */
2037 /* Call dummy code. */
2038 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2039 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
2040 set_gdbarch_frame_red_zone_size (gdbarch, 128);
2042 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
2043 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2044 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2046 set_gdbarch_return_value (gdbarch, amd64_return_value);
2048 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
2050 /* Avoid wiring in the MMX registers for now. */
2051 set_gdbarch_num_pseudo_regs (gdbarch, 0);
2052 tdep->mm0_regnum = -1;
2054 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
2056 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2057 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
2058 frame_base_set_default (gdbarch, &amd64_frame_base);
2060 /* If we have a register mapping, enable the generic core file support. */
2061 if (tdep->gregset_reg_offset)
2062 set_gdbarch_regset_from_core_section (gdbarch,
2063 amd64_regset_from_core_section);
2065 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
2069 /* The 64-bit FXSAVE format differs from the 32-bit format in the
2070 sense that the instruction pointer and data pointer are simply
2071 64-bit offsets into the code segment and the data segment instead
2072 of a selector offset pair. The functions below store the upper 32
2073 bits of these pointers (instead of just the 16-bits of the segment
2076 /* Fill register REGNUM in REGCACHE with the appropriate
2077 floating-point or SSE register value from *FXSAVE. If REGNUM is
2078 -1, do this for all registers. This function masks off any of the
2079 reserved bits in *FXSAVE. */
2082 amd64_supply_fxsave (struct regcache *regcache, int regnum,
2085 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2086 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2088 i387_supply_fxsave (regcache, regnum, fxsave);
2090 if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
2092 const gdb_byte *regs = fxsave;
2094 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2095 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2096 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2097 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2101 /* Fill register REGNUM (if it is a floating-point or SSE register) in
2102 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2103 all registers. This function doesn't touch any of the reserved
2107 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2110 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2111 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2112 gdb_byte *regs = fxsave;
2114 i387_collect_fxsave (regcache, regnum, fxsave);
2116 if (gdbarch_ptr_bit (gdbarch) == 64)
2118 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2119 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2120 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2121 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);