1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
6 Contributed by Jiri Smid, SuSE Labs.
8 This file is part of GDB.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "opcode/i386.h"
26 #include "arch-utils.h"
28 #include "dummy-frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
40 #include "gdb_assert.h"
42 #include "amd64-tdep.h"
43 #include "i387-tdep.h"
45 /* Note that the AMD64 architecture was previously known as x86-64.
46 The latter is (forever) engraved into the canonical system name as
47 returned by config.guess, and used as the name for the AMD64 port
48 of GNU/Linux. The BSD's have renamed their ports to amd64; they
49 don't like to shout. For GDB we prefer the amd64_-prefix over the
50 x86_64_-prefix since it's so much easier to type. */
52 /* Register information. */
54 static const char *amd64_register_names[] =
56 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
58 /* %r8 is indeed register number 8. */
59 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
60 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
62 /* %st0 is register number 24. */
63 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
64 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
66 /* %xmm0 is register number 40. */
67 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
68 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
72 /* Total number of registers. */
73 #define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
75 /* Return the name of register REGNUM. */
78 amd64_register_name (struct gdbarch *gdbarch, int regnum)
80 if (regnum >= 0 && regnum < AMD64_NUM_REGS)
81 return amd64_register_names[regnum];
86 /* Return the GDB type object for the "standard" data type of data in
90 amd64_register_type (struct gdbarch *gdbarch, int regnum)
92 if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_RDI_REGNUM)
93 return builtin_type (gdbarch)->builtin_int64;
94 if (regnum == AMD64_RBP_REGNUM || regnum == AMD64_RSP_REGNUM)
95 return builtin_type (gdbarch)->builtin_data_ptr;
96 if (regnum >= AMD64_R8_REGNUM && regnum <= AMD64_R15_REGNUM)
97 return builtin_type (gdbarch)->builtin_int64;
98 if (regnum == AMD64_RIP_REGNUM)
99 return builtin_type (gdbarch)->builtin_func_ptr;
100 if (regnum == AMD64_EFLAGS_REGNUM)
101 return i386_eflags_type (gdbarch);
102 if (regnum >= AMD64_CS_REGNUM && regnum <= AMD64_GS_REGNUM)
103 return builtin_type (gdbarch)->builtin_int32;
104 if (regnum >= AMD64_ST0_REGNUM && regnum <= AMD64_ST0_REGNUM + 7)
105 return i387_ext_type (gdbarch);
106 if (regnum >= AMD64_FCTRL_REGNUM && regnum <= AMD64_FCTRL_REGNUM + 7)
107 return builtin_type (gdbarch)->builtin_int32;
108 if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
109 return i386_sse_type (gdbarch);
110 if (regnum == AMD64_MXCSR_REGNUM)
111 return i386_mxcsr_type (gdbarch);
113 internal_error (__FILE__, __LINE__, _("invalid regnum"));
116 /* DWARF Register Number Mapping as defined in the System V psABI,
119 static int amd64_dwarf_regmap[] =
121 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
122 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
123 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
124 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
126 /* Frame Pointer Register RBP. */
129 /* Stack Pointer Register RSP. */
132 /* Extended Integer Registers 8 - 15. */
133 8, 9, 10, 11, 12, 13, 14, 15,
135 /* Return Address RA. Mapped to RIP. */
138 /* SSE Registers 0 - 7. */
139 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
140 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
141 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
142 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
144 /* Extended SSE Registers 8 - 15. */
145 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
146 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
147 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
148 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
150 /* Floating Point Registers 0-7. */
151 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
152 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
153 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
154 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
156 /* Control and Status Flags Register. */
159 /* Selector Registers. */
169 /* Segment Base Address Registers. */
175 /* Special Selector Registers. */
179 /* Floating Point Control Registers. */
185 static const int amd64_dwarf_regmap_len =
186 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
188 /* Convert DWARF register number REG to the appropriate register
189 number used by GDB. */
192 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
196 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
197 regnum = amd64_dwarf_regmap[reg];
200 warning (_("Unmapped DWARF Register #%d encountered."), reg);
205 /* Map architectural register numbers to gdb register numbers. */
207 static const int amd64_arch_regmap[16] =
209 AMD64_RAX_REGNUM, /* %rax */
210 AMD64_RCX_REGNUM, /* %rcx */
211 AMD64_RDX_REGNUM, /* %rdx */
212 AMD64_RBX_REGNUM, /* %rbx */
213 AMD64_RSP_REGNUM, /* %rsp */
214 AMD64_RBP_REGNUM, /* %rbp */
215 AMD64_RSI_REGNUM, /* %rsi */
216 AMD64_RDI_REGNUM, /* %rdi */
217 AMD64_R8_REGNUM, /* %r8 */
218 AMD64_R9_REGNUM, /* %r9 */
219 AMD64_R10_REGNUM, /* %r10 */
220 AMD64_R11_REGNUM, /* %r11 */
221 AMD64_R12_REGNUM, /* %r12 */
222 AMD64_R13_REGNUM, /* %r13 */
223 AMD64_R14_REGNUM, /* %r14 */
224 AMD64_R15_REGNUM /* %r15 */
227 static const int amd64_arch_regmap_len =
228 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
230 /* Convert architectural register number REG to the appropriate register
231 number used by GDB. */
234 amd64_arch_reg_to_regnum (int reg)
236 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
238 return amd64_arch_regmap[reg];
243 /* Register classes as defined in the psABI. */
257 /* Return the union class of CLASS1 and CLASS2. See the psABI for
260 static enum amd64_reg_class
261 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
263 /* Rule (a): If both classes are equal, this is the resulting class. */
264 if (class1 == class2)
267 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
268 is the other class. */
269 if (class1 == AMD64_NO_CLASS)
271 if (class2 == AMD64_NO_CLASS)
274 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
275 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
278 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
279 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
280 return AMD64_INTEGER;
282 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
283 MEMORY is used as class. */
284 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
285 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
286 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
289 /* Rule (f): Otherwise class SSE is used. */
293 static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
295 /* Return non-zero if TYPE is a non-POD structure or union type. */
298 amd64_non_pod_p (struct type *type)
300 /* ??? A class with a base class certainly isn't POD, but does this
301 catch all non-POD structure types? */
302 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
308 /* Classify TYPE according to the rules for aggregate (structures and
309 arrays) and union types, and store the result in CLASS. */
312 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
314 int len = TYPE_LENGTH (type);
316 /* 1. If the size of an object is larger than two eightbytes, or in
317 C++, is a non-POD structure or union type, or contains
318 unaligned fields, it has class memory. */
319 if (len > 16 || amd64_non_pod_p (type))
321 class[0] = class[1] = AMD64_MEMORY;
325 /* 2. Both eightbytes get initialized to class NO_CLASS. */
326 class[0] = class[1] = AMD64_NO_CLASS;
328 /* 3. Each field of an object is classified recursively so that
329 always two fields are considered. The resulting class is
330 calculated according to the classes of the fields in the
333 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
335 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
337 /* All fields in an array have the same type. */
338 amd64_classify (subtype, class);
339 if (len > 8 && class[1] == AMD64_NO_CLASS)
346 /* Structure or union. */
347 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
348 || TYPE_CODE (type) == TYPE_CODE_UNION);
350 for (i = 0; i < TYPE_NFIELDS (type); i++)
352 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
353 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
354 enum amd64_reg_class subclass[2];
356 /* Ignore static fields. */
357 if (field_is_static (&TYPE_FIELD (type, i)))
360 gdb_assert (pos == 0 || pos == 1);
362 amd64_classify (subtype, subclass);
363 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
365 class[1] = amd64_merge_classes (class[1], subclass[1]);
369 /* 4. Then a post merger cleanup is done: */
371 /* Rule (a): If one of the classes is MEMORY, the whole argument is
373 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
374 class[0] = class[1] = AMD64_MEMORY;
376 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
378 if (class[0] == AMD64_SSEUP)
379 class[0] = AMD64_SSE;
380 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
381 class[1] = AMD64_SSE;
384 /* Classify TYPE, and store the result in CLASS. */
387 amd64_classify (struct type *type, enum amd64_reg_class class[2])
389 enum type_code code = TYPE_CODE (type);
390 int len = TYPE_LENGTH (type);
392 class[0] = class[1] = AMD64_NO_CLASS;
394 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
395 long, long long, and pointers are in the INTEGER class. Similarly,
396 range types, used by languages such as Ada, are also in the INTEGER
398 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
399 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
400 || code == TYPE_CODE_CHAR
401 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
402 && (len == 1 || len == 2 || len == 4 || len == 8))
403 class[0] = AMD64_INTEGER;
405 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
407 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
408 && (len == 4 || len == 8))
410 class[0] = AMD64_SSE;
412 /* Arguments of types __float128, _Decimal128 and __m128 are split into
413 two halves. The least significant ones belong to class SSE, the most
414 significant one to class SSEUP. */
415 else if (code == TYPE_CODE_DECFLOAT && len == 16)
416 /* FIXME: __float128, __m128. */
417 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
419 /* The 64-bit mantissa of arguments of type long double belongs to
420 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
422 else if (code == TYPE_CODE_FLT && len == 16)
423 /* Class X87 and X87UP. */
424 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
427 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
428 || code == TYPE_CODE_UNION)
429 amd64_classify_aggregate (type, class);
432 static enum return_value_convention
433 amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
434 struct type *type, struct regcache *regcache,
435 gdb_byte *readbuf, const gdb_byte *writebuf)
437 enum amd64_reg_class class[2];
438 int len = TYPE_LENGTH (type);
439 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
440 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
445 gdb_assert (!(readbuf && writebuf));
447 /* 1. Classify the return type with the classification algorithm. */
448 amd64_classify (type, class);
450 /* 2. If the type has class MEMORY, then the caller provides space
451 for the return value and passes the address of this storage in
452 %rdi as if it were the first argument to the function. In effect,
453 this address becomes a hidden first argument.
455 On return %rax will contain the address that has been passed in
456 by the caller in %rdi. */
457 if (class[0] == AMD64_MEMORY)
459 /* As indicated by the comment above, the ABI guarantees that we
460 can always find the return value just after the function has
467 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
468 read_memory (addr, readbuf, TYPE_LENGTH (type));
471 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
474 gdb_assert (class[1] != AMD64_MEMORY);
475 gdb_assert (len <= 16);
477 for (i = 0; len > 0; i++, len -= 8)
485 /* 3. If the class is INTEGER, the next available register
486 of the sequence %rax, %rdx is used. */
487 regnum = integer_regnum[integer_reg++];
491 /* 4. If the class is SSE, the next available SSE register
492 of the sequence %xmm0, %xmm1 is used. */
493 regnum = sse_regnum[sse_reg++];
497 /* 5. If the class is SSEUP, the eightbyte is passed in the
498 upper half of the last used SSE register. */
499 gdb_assert (sse_reg > 0);
500 regnum = sse_regnum[sse_reg - 1];
505 /* 6. If the class is X87, the value is returned on the X87
506 stack in %st0 as 80-bit x87 number. */
507 regnum = AMD64_ST0_REGNUM;
509 i387_return_value (gdbarch, regcache);
513 /* 7. If the class is X87UP, the value is returned together
514 with the previous X87 value in %st0. */
515 gdb_assert (i > 0 && class[0] == AMD64_X87);
516 regnum = AMD64_ST0_REGNUM;
525 gdb_assert (!"Unexpected register class.");
528 gdb_assert (regnum != -1);
531 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
534 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
538 return RETURN_VALUE_REGISTER_CONVENTION;
543 amd64_push_arguments (struct regcache *regcache, int nargs,
544 struct value **args, CORE_ADDR sp, int struct_return)
546 static int integer_regnum[] =
548 AMD64_RDI_REGNUM, /* %rdi */
549 AMD64_RSI_REGNUM, /* %rsi */
550 AMD64_RDX_REGNUM, /* %rdx */
551 AMD64_RCX_REGNUM, /* %rcx */
555 static int sse_regnum[] =
557 /* %xmm0 ... %xmm7 */
558 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
559 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
560 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
561 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
563 struct value **stack_args = alloca (nargs * sizeof (struct value *));
564 int num_stack_args = 0;
565 int num_elements = 0;
571 /* Reserve a register for the "hidden" argument. */
575 for (i = 0; i < nargs; i++)
577 struct type *type = value_type (args[i]);
578 int len = TYPE_LENGTH (type);
579 enum amd64_reg_class class[2];
580 int needed_integer_regs = 0;
581 int needed_sse_regs = 0;
584 /* Classify argument. */
585 amd64_classify (type, class);
587 /* Calculate the number of integer and SSE registers needed for
589 for (j = 0; j < 2; j++)
591 if (class[j] == AMD64_INTEGER)
592 needed_integer_regs++;
593 else if (class[j] == AMD64_SSE)
597 /* Check whether enough registers are available, and if the
598 argument should be passed in registers at all. */
599 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
600 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
601 || (needed_integer_regs == 0 && needed_sse_regs == 0))
603 /* The argument will be passed on the stack. */
604 num_elements += ((len + 7) / 8);
605 stack_args[num_stack_args++] = args[i];
609 /* The argument will be passed in registers. */
610 const gdb_byte *valbuf = value_contents (args[i]);
613 gdb_assert (len <= 16);
615 for (j = 0; len > 0; j++, len -= 8)
623 regnum = integer_regnum[integer_reg++];
627 regnum = sse_regnum[sse_reg++];
631 gdb_assert (sse_reg > 0);
632 regnum = sse_regnum[sse_reg - 1];
637 gdb_assert (!"Unexpected register class.");
640 gdb_assert (regnum != -1);
641 memset (buf, 0, sizeof buf);
642 memcpy (buf, valbuf + j * 8, min (len, 8));
643 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
648 /* Allocate space for the arguments on the stack. */
649 sp -= num_elements * 8;
651 /* The psABI says that "The end of the input argument area shall be
652 aligned on a 16 byte boundary." */
655 /* Write out the arguments to the stack. */
656 for (i = 0; i < num_stack_args; i++)
658 struct type *type = value_type (stack_args[i]);
659 const gdb_byte *valbuf = value_contents (stack_args[i]);
660 int len = TYPE_LENGTH (type);
662 write_memory (sp + element * 8, valbuf, len);
663 element += ((len + 7) / 8);
666 /* The psABI says that "For calls that may call functions that use
667 varargs or stdargs (prototype-less calls or calls to functions
668 containing ellipsis (...) in the declaration) %al is used as
669 hidden argument to specify the number of SSE registers used. */
670 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
675 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
676 struct regcache *regcache, CORE_ADDR bp_addr,
677 int nargs, struct value **args, CORE_ADDR sp,
678 int struct_return, CORE_ADDR struct_addr)
682 /* Pass arguments. */
683 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
685 /* Pass "hidden" argument". */
688 store_unsigned_integer (buf, 8, struct_addr);
689 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
692 /* Store return address. */
694 store_unsigned_integer (buf, 8, bp_addr);
695 write_memory (sp, buf, 8);
697 /* Finally, update the stack pointer... */
698 store_unsigned_integer (buf, 8, sp);
699 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
701 /* ...and fake a frame pointer. */
702 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
707 /* Displaced instruction handling. */
709 /* A partially decoded instruction.
710 This contains enough details for displaced stepping purposes. */
714 /* The number of opcode bytes. */
716 /* The offset of the rex prefix or -1 if not present. */
718 /* The offset to the first opcode byte. */
720 /* The offset to the modrm byte or -1 if not present. */
723 /* The raw instruction. */
727 struct displaced_step_closure
729 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
734 /* Details of the instruction. */
735 struct amd64_insn insn_details;
737 /* Amount of space allocated to insn_buf. */
740 /* The possibly modified insn.
741 This is a variable-length field. */
742 gdb_byte insn_buf[1];
745 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
746 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
747 at which point delete these in favor of libopcodes' versions). */
749 static const unsigned char onebyte_has_modrm[256] = {
750 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
751 /* ------------------------------- */
752 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
753 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
754 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
755 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
756 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
757 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
758 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
759 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
760 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
761 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
762 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
763 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
764 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
765 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
766 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
767 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
768 /* ------------------------------- */
769 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
772 static const unsigned char twobyte_has_modrm[256] = {
773 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
774 /* ------------------------------- */
775 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
776 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
777 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
778 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
779 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
780 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
781 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
782 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
783 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
784 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
785 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
786 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
787 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
788 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
789 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
790 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
791 /* ------------------------------- */
792 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
795 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
798 rex_prefix_p (gdb_byte pfx)
800 return REX_PREFIX_P (pfx);
803 /* Skip the legacy instruction prefixes in INSN.
804 We assume INSN is properly sentineled so we don't have to worry
805 about falling off the end of the buffer. */
808 amd64_skip_prefixes (gdb_byte *insn)
814 case DATA_PREFIX_OPCODE:
815 case ADDR_PREFIX_OPCODE:
816 case CS_PREFIX_OPCODE:
817 case DS_PREFIX_OPCODE:
818 case ES_PREFIX_OPCODE:
819 case FS_PREFIX_OPCODE:
820 case GS_PREFIX_OPCODE:
821 case SS_PREFIX_OPCODE:
822 case LOCK_PREFIX_OPCODE:
823 case REPE_PREFIX_OPCODE:
824 case REPNE_PREFIX_OPCODE:
836 /* fprintf-function for amd64_insn_length.
837 This function is a nop, we don't want to print anything, we just want to
838 compute the length of the insn. */
840 static int ATTR_FORMAT (printf, 2, 3)
841 amd64_insn_length_fprintf (void *stream, const char *format, ...)
846 /* Initialize a struct disassemble_info for amd64_insn_length. */
849 amd64_insn_length_init_dis (struct gdbarch *gdbarch,
850 struct disassemble_info *di,
851 const gdb_byte *insn, int max_len,
854 init_disassemble_info (di, NULL, amd64_insn_length_fprintf);
856 /* init_disassemble_info installs buffer_read_memory, etc.
857 so we don't need to do that here.
858 The cast is necessary until disassemble_info is const-ified. */
859 di->buffer = (gdb_byte *) insn;
860 di->buffer_length = max_len;
861 di->buffer_vma = addr;
863 di->arch = gdbarch_bfd_arch_info (gdbarch)->arch;
864 di->mach = gdbarch_bfd_arch_info (gdbarch)->mach;
865 di->endian = gdbarch_byte_order (gdbarch);
866 di->endian_code = gdbarch_byte_order_for_code (gdbarch);
868 disassemble_init_for_target (di);
871 /* Return the length in bytes of INSN.
872 MAX_LEN is the size of the buffer containing INSN.
873 libopcodes currently doesn't export a utility to compute the
874 instruction length, so use the disassembler until then. */
877 amd64_insn_length (struct gdbarch *gdbarch,
878 const gdb_byte *insn, int max_len, CORE_ADDR addr)
880 struct disassemble_info di;
882 amd64_insn_length_init_dis (gdbarch, &di, insn, max_len, addr);
884 return gdbarch_print_insn (gdbarch, addr, &di);
887 /* Return an integer register (other than RSP) that is unused as an input
889 In order to not require adding a rex prefix if the insn doesn't already
890 have one, the result is restricted to RAX ... RDI, sans RSP.
891 The register numbering of the result follows architecture ordering,
895 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
897 /* 1 bit for each reg */
898 int used_regs_mask = 0;
900 /* There can be at most 3 int regs used as inputs in an insn, and we have
901 7 to choose from (RAX ... RDI, sans RSP).
902 This allows us to take a conservative approach and keep things simple.
903 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
904 that implicitly specify RAX. */
907 used_regs_mask |= 1 << EAX_REG_NUM;
908 /* Similarily avoid RDX, implicit operand in divides. */
909 used_regs_mask |= 1 << EDX_REG_NUM;
911 used_regs_mask |= 1 << ESP_REG_NUM;
913 /* If the opcode is one byte long and there's no ModRM byte,
914 assume the opcode specifies a register. */
915 if (details->opcode_len == 1 && details->modrm_offset == -1)
916 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
918 /* Mark used regs in the modrm/sib bytes. */
919 if (details->modrm_offset != -1)
921 int modrm = details->raw_insn[details->modrm_offset];
922 int mod = MODRM_MOD_FIELD (modrm);
923 int reg = MODRM_REG_FIELD (modrm);
924 int rm = MODRM_RM_FIELD (modrm);
925 int have_sib = mod != 3 && rm == 4;
927 /* Assume the reg field of the modrm byte specifies a register. */
928 used_regs_mask |= 1 << reg;
932 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
933 int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
934 used_regs_mask |= 1 << base;
935 used_regs_mask |= 1 << index;
939 used_regs_mask |= 1 << rm;
943 gdb_assert (used_regs_mask < 256);
944 gdb_assert (used_regs_mask != 255);
946 /* Finally, find a free reg. */
950 for (i = 0; i < 8; ++i)
952 if (! (used_regs_mask & (1 << i)))
956 /* We shouldn't get here. */
957 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
961 /* Extract the details of INSN that we need. */
964 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
966 gdb_byte *start = insn;
969 details->raw_insn = insn;
971 details->opcode_len = -1;
972 details->rex_offset = -1;
973 details->opcode_offset = -1;
974 details->modrm_offset = -1;
976 /* Skip legacy instruction prefixes. */
977 insn = amd64_skip_prefixes (insn);
979 /* Skip REX instruction prefix. */
980 if (rex_prefix_p (*insn))
982 details->rex_offset = insn - start;
986 details->opcode_offset = insn - start;
988 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
990 /* Two or three-byte opcode. */
992 need_modrm = twobyte_has_modrm[*insn];
994 /* Check for three-byte opcode. */
1004 details->opcode_len = 3;
1007 details->opcode_len = 2;
1013 /* One-byte opcode. */
1014 need_modrm = onebyte_has_modrm[*insn];
1015 details->opcode_len = 1;
1021 details->modrm_offset = insn - start;
1025 /* Update %rip-relative addressing in INSN.
1027 %rip-relative addressing only uses a 32-bit displacement.
1028 32 bits is not enough to be guaranteed to cover the distance between where
1029 the real instruction is and where its copy is.
1030 Convert the insn to use base+disp addressing.
1031 We set base = pc + insn_length so we can leave disp unchanged. */
1034 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1035 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1037 const struct amd64_insn *insn_details = &dsc->insn_details;
1038 int modrm_offset = insn_details->modrm_offset;
1039 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1043 int arch_tmp_regno, tmp_regno;
1044 ULONGEST orig_value;
1046 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1049 /* Compute the rip-relative address. */
1050 disp = extract_signed_integer (insn, sizeof (int32_t));
1051 insn_length = amd64_insn_length (gdbarch, dsc->insn_buf, dsc->max_len, from);
1052 rip_base = from + insn_length;
1054 /* We need a register to hold the address.
1055 Pick one not used in the insn.
1056 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1057 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1058 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1060 /* REX.B should be unset as we were using rip-relative addressing,
1061 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1062 if (insn_details->rex_offset != -1)
1063 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1065 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1066 dsc->tmp_regno = tmp_regno;
1067 dsc->tmp_save = orig_value;
1070 /* Convert the ModRM field to be base+disp. */
1071 dsc->insn_buf[modrm_offset] &= ~0xc7;
1072 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1074 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1076 if (debug_displaced)
1077 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1078 "displaced: using temp reg %d, old value %s, new value %s\n",
1079 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1080 paddress (gdbarch, rip_base));
1084 fixup_displaced_copy (struct gdbarch *gdbarch,
1085 struct displaced_step_closure *dsc,
1086 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1088 const struct amd64_insn *details = &dsc->insn_details;
1090 if (details->modrm_offset != -1)
1092 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1094 if ((modrm & 0xc7) == 0x05)
1096 /* The insn uses rip-relative addressing.
1098 fixup_riprel (gdbarch, dsc, from, to, regs);
1103 struct displaced_step_closure *
1104 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1105 CORE_ADDR from, CORE_ADDR to,
1106 struct regcache *regs)
1108 int len = gdbarch_max_insn_length (gdbarch);
1109 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1110 continually watch for running off the end of the buffer. */
1111 int fixup_sentinel_space = len;
1112 struct displaced_step_closure *dsc =
1113 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1114 gdb_byte *buf = &dsc->insn_buf[0];
1115 struct amd64_insn *details = &dsc->insn_details;
1118 dsc->max_len = len + fixup_sentinel_space;
1120 read_memory (from, buf, len);
1122 /* Set up the sentinel space so we don't have to worry about running
1123 off the end of the buffer. An excessive number of leading prefixes
1124 could otherwise cause this. */
1125 memset (buf + len, 0, fixup_sentinel_space);
1127 amd64_get_insn_details (buf, details);
1129 /* GDB may get control back after the insn after the syscall.
1130 Presumably this is a kernel bug.
1131 If this is a syscall, make sure there's a nop afterwards. */
1135 if (amd64_syscall_p (details, &syscall_length))
1136 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1139 /* Modify the insn to cope with the address where it will be executed from.
1140 In particular, handle any rip-relative addressing. */
1141 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1143 write_memory (to, buf, len);
1145 if (debug_displaced)
1147 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1148 paddress (gdbarch, from), paddress (gdbarch, to));
1149 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1156 amd64_absolute_jmp_p (const struct amd64_insn *details)
1158 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1160 if (insn[0] == 0xff)
1162 /* jump near, absolute indirect (/4) */
1163 if ((insn[1] & 0x38) == 0x20)
1166 /* jump far, absolute indirect (/5) */
1167 if ((insn[1] & 0x38) == 0x28)
1175 amd64_absolute_call_p (const struct amd64_insn *details)
1177 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1179 if (insn[0] == 0xff)
1181 /* Call near, absolute indirect (/2) */
1182 if ((insn[1] & 0x38) == 0x10)
1185 /* Call far, absolute indirect (/3) */
1186 if ((insn[1] & 0x38) == 0x18)
1194 amd64_ret_p (const struct amd64_insn *details)
1196 /* NOTE: gcc can emit "repz ; ret". */
1197 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1201 case 0xc2: /* ret near, pop N bytes */
1202 case 0xc3: /* ret near */
1203 case 0xca: /* ret far, pop N bytes */
1204 case 0xcb: /* ret far */
1205 case 0xcf: /* iret */
1214 amd64_call_p (const struct amd64_insn *details)
1216 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1218 if (amd64_absolute_call_p (details))
1221 /* call near, relative */
1222 if (insn[0] == 0xe8)
1228 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1229 length in bytes. Otherwise, return zero. */
1232 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1234 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1236 if (insn[0] == 0x0f && insn[1] == 0x05)
1245 /* Fix up the state of registers and memory after having single-stepped
1246 a displaced instruction. */
1249 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1250 struct displaced_step_closure *dsc,
1251 CORE_ADDR from, CORE_ADDR to,
1252 struct regcache *regs)
1254 /* The offset we applied to the instruction's address. */
1255 ULONGEST insn_offset = to - from;
1256 gdb_byte *insn = dsc->insn_buf;
1257 const struct amd64_insn *insn_details = &dsc->insn_details;
1259 if (debug_displaced)
1260 fprintf_unfiltered (gdb_stdlog,
1261 "displaced: fixup (%s, %s), "
1262 "insn = 0x%02x 0x%02x ...\n",
1263 paddress (gdbarch, from), paddress (gdbarch, to),
1266 /* If we used a tmp reg, restore it. */
1270 if (debug_displaced)
1271 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1272 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1273 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1276 /* The list of issues to contend with here is taken from
1277 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1278 Yay for Free Software! */
1280 /* Relocate the %rip back to the program's instruction stream,
1283 /* Except in the case of absolute or indirect jump or call
1284 instructions, or a return instruction, the new rip is relative to
1285 the displaced instruction; make it relative to the original insn.
1286 Well, signal handler returns don't need relocation either, but we use the
1287 value of %rip to recognize those; see below. */
1288 if (! amd64_absolute_jmp_p (insn_details)
1289 && ! amd64_absolute_call_p (insn_details)
1290 && ! amd64_ret_p (insn_details))
1295 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1297 /* A signal trampoline system call changes the %rip, resuming
1298 execution of the main program after the signal handler has
1299 returned. That makes them like 'return' instructions; we
1300 shouldn't relocate %rip.
1302 But most system calls don't, and we do need to relocate %rip.
1304 Our heuristic for distinguishing these cases: if stepping
1305 over the system call instruction left control directly after
1306 the instruction, the we relocate --- control almost certainly
1307 doesn't belong in the displaced copy. Otherwise, we assume
1308 the instruction has put control where it belongs, and leave
1309 it unrelocated. Goodness help us if there are PC-relative
1311 if (amd64_syscall_p (insn_details, &insn_len)
1312 && orig_rip != to + insn_len
1313 /* GDB can get control back after the insn after the syscall.
1314 Presumably this is a kernel bug.
1315 Fixup ensures its a nop, we add one to the length for it. */
1316 && orig_rip != to + insn_len + 1)
1318 if (debug_displaced)
1319 fprintf_unfiltered (gdb_stdlog,
1320 "displaced: syscall changed %%rip; "
1321 "not relocating\n");
1325 ULONGEST rip = orig_rip - insn_offset;
1327 /* If we just stepped over a breakpoint insn, we don't backup
1328 the pc on purpose; this is to match behaviour without
1331 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1333 if (debug_displaced)
1334 fprintf_unfiltered (gdb_stdlog,
1336 "relocated %%rip from %s to %s\n",
1337 paddress (gdbarch, orig_rip),
1338 paddress (gdbarch, rip));
1342 /* If the instruction was PUSHFL, then the TF bit will be set in the
1343 pushed value, and should be cleared. We'll leave this for later,
1344 since GDB already messes up the TF flag when stepping over a
1347 /* If the instruction was a call, the return address now atop the
1348 stack is the address following the copied instruction. We need
1349 to make it the address following the original instruction. */
1350 if (amd64_call_p (insn_details))
1354 const ULONGEST retaddr_len = 8;
1356 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1357 retaddr = read_memory_unsigned_integer (rsp, retaddr_len);
1358 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1359 write_memory_unsigned_integer (rsp, retaddr_len, retaddr);
1361 if (debug_displaced)
1362 fprintf_unfiltered (gdb_stdlog,
1363 "displaced: relocated return addr at %s "
1365 paddress (gdbarch, rsp),
1366 paddress (gdbarch, retaddr));
1370 /* The maximum number of saved registers. This should include %rip. */
1371 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1373 struct amd64_frame_cache
1377 CORE_ADDR sp_offset;
1380 /* Saved registers. */
1381 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1385 /* Do we have a frame? */
1389 /* Initialize a frame cache. */
1392 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1398 cache->sp_offset = -8;
1401 /* Saved registers. We initialize these to -1 since zero is a valid
1402 offset (that's where %rbp is supposed to be stored). */
1403 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1404 cache->saved_regs[i] = -1;
1405 cache->saved_sp = 0;
1406 cache->saved_sp_reg = -1;
1408 /* Frameless until proven otherwise. */
1409 cache->frameless_p = 1;
1412 /* Allocate and initialize a frame cache. */
1414 static struct amd64_frame_cache *
1415 amd64_alloc_frame_cache (void)
1417 struct amd64_frame_cache *cache;
1419 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1420 amd64_init_frame_cache (cache);
1424 /* GCC 4.4 and later, can put code in the prologue to realign the
1425 stack pointer. Check whether PC points to such code, and update
1426 CACHE accordingly. Return the first instruction after the code
1427 sequence or CURRENT_PC, whichever is smaller. If we don't
1428 recognize the code, return PC. */
1431 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1432 struct amd64_frame_cache *cache)
1434 /* There are 2 code sequences to re-align stack before the frame
1437 1. Use a caller-saved saved register:
1443 2. Use a callee-saved saved register:
1450 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1452 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1453 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1458 int offset, offset_and;
1460 if (target_read_memory (pc, buf, sizeof buf))
1463 /* Check caller-saved saved register. The first instruction has
1464 to be "leaq 8(%rsp), %reg". */
1465 if ((buf[0] & 0xfb) == 0x48
1470 /* MOD must be binary 10 and R/M must be binary 100. */
1471 if ((buf[2] & 0xc7) != 0x44)
1474 /* REG has register number. */
1475 reg = (buf[2] >> 3) & 7;
1477 /* Check the REX.R bit. */
1485 /* Check callee-saved saved register. The first instruction
1486 has to be "pushq %reg". */
1488 if ((buf[0] & 0xf8) == 0x50)
1490 else if ((buf[0] & 0xf6) == 0x40
1491 && (buf[1] & 0xf8) == 0x50)
1493 /* Check the REX.B bit. */
1494 if ((buf[0] & 1) != 0)
1503 reg += buf[offset] & 0x7;
1507 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1508 if ((buf[offset] & 0xfb) != 0x48
1509 || buf[offset + 1] != 0x8d
1510 || buf[offset + 3] != 0x24
1511 || buf[offset + 4] != 0x10)
1514 /* MOD must be binary 10 and R/M must be binary 100. */
1515 if ((buf[offset + 2] & 0xc7) != 0x44)
1518 /* REG has register number. */
1519 r = (buf[offset + 2] >> 3) & 7;
1521 /* Check the REX.R bit. */
1522 if (buf[offset] == 0x4c)
1525 /* Registers in pushq and leaq have to be the same. */
1532 /* Rigister can't be %rsp nor %rbp. */
1533 if (reg == 4 || reg == 5)
1536 /* The next instruction has to be "andq $-XXX, %rsp". */
1537 if (buf[offset] != 0x48
1538 || buf[offset + 2] != 0xe4
1539 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1542 offset_and = offset;
1543 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1545 /* The next instruction has to be "pushq -8(%reg)". */
1547 if (buf[offset] == 0xff)
1549 else if ((buf[offset] & 0xf6) == 0x40
1550 && buf[offset + 1] == 0xff)
1552 /* Check the REX.B bit. */
1553 if ((buf[offset] & 0x1) != 0)
1560 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1562 if (buf[offset + 1] != 0xf8
1563 || (buf[offset] & 0xf8) != 0x70)
1566 /* R/M has register. */
1567 r += buf[offset] & 7;
1569 /* Registers in leaq and pushq have to be the same. */
1573 if (current_pc > pc + offset_and)
1574 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
1576 return min (pc + offset + 2, current_pc);
1579 /* Do a limited analysis of the prologue at PC and update CACHE
1580 accordingly. Bail out early if CURRENT_PC is reached. Return the
1581 address where the analysis stopped.
1583 We will handle only functions beginning with:
1586 movq %rsp, %rbp 0x48 0x89 0xe5
1588 Any function that doesn't start with this sequence will be assumed
1589 to have no prologue and thus no valid frame pointer in %rbp. */
1592 amd64_analyze_prologue (CORE_ADDR pc, CORE_ADDR current_pc,
1593 struct amd64_frame_cache *cache)
1595 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1599 if (current_pc <= pc)
1602 pc = amd64_analyze_stack_align (pc, current_pc, cache);
1604 op = read_memory_unsigned_integer (pc, 1);
1606 if (op == 0x55) /* pushq %rbp */
1608 /* Take into account that we've executed the `pushq %rbp' that
1609 starts this instruction sequence. */
1610 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
1611 cache->sp_offset += 8;
1613 /* If that's all, return now. */
1614 if (current_pc <= pc + 1)
1617 /* Check for `movq %rsp, %rbp'. */
1618 read_memory (pc + 1, buf, 3);
1619 if (memcmp (buf, proto, 3) != 0)
1622 /* OK, we actually have a frame. */
1623 cache->frameless_p = 0;
1630 /* Return PC of first real instruction. */
1633 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
1635 struct amd64_frame_cache cache;
1638 amd64_init_frame_cache (&cache);
1639 pc = amd64_analyze_prologue (start_pc, 0xffffffffffffffffLL, &cache);
1640 if (cache.frameless_p)
1647 /* Normal frames. */
1649 static struct amd64_frame_cache *
1650 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
1652 struct amd64_frame_cache *cache;
1659 cache = amd64_alloc_frame_cache ();
1660 *this_cache = cache;
1662 cache->pc = get_frame_func (this_frame);
1664 amd64_analyze_prologue (cache->pc, get_frame_pc (this_frame), cache);
1666 if (cache->saved_sp_reg != -1)
1668 /* Stack pointer has been saved. */
1669 get_frame_register (this_frame, cache->saved_sp_reg, buf);
1670 cache->saved_sp = extract_unsigned_integer(buf, 8);
1673 if (cache->frameless_p)
1675 /* We didn't find a valid frame. If we're at the start of a
1676 function, or somewhere half-way its prologue, the function's
1677 frame probably hasn't been fully setup yet. Try to
1678 reconstruct the base address for the stack frame by looking
1679 at the stack pointer. For truly "frameless" functions this
1682 if (cache->saved_sp_reg != -1)
1684 /* We're halfway aligning the stack. */
1685 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1686 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1688 /* This will be added back below. */
1689 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1693 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1694 cache->base = extract_unsigned_integer (buf, 8) + cache->sp_offset;
1699 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
1700 cache->base = extract_unsigned_integer (buf, 8);
1703 /* Now that we have the base address for the stack frame we can
1704 calculate the value of %rsp in the calling frame. */
1705 cache->saved_sp = cache->base + 16;
1707 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1708 frame we find it at the same offset from the reconstructed base
1709 address. If we're halfway aligning the stack, %rip is handled
1710 differently (see above). */
1711 if (!cache->frameless_p || cache->saved_sp_reg == -1)
1712 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
1714 /* Adjust all the saved registers such that they contain addresses
1715 instead of offsets. */
1716 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1717 if (cache->saved_regs[i] != -1)
1718 cache->saved_regs[i] += cache->base;
1724 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1725 struct frame_id *this_id)
1727 struct amd64_frame_cache *cache =
1728 amd64_frame_cache (this_frame, this_cache);
1730 /* This marks the outermost frame. */
1731 if (cache->base == 0)
1734 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1737 static struct value *
1738 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1741 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1742 struct amd64_frame_cache *cache =
1743 amd64_frame_cache (this_frame, this_cache);
1745 gdb_assert (regnum >= 0);
1747 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
1748 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
1750 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
1751 return frame_unwind_got_memory (this_frame, regnum,
1752 cache->saved_regs[regnum]);
1754 return frame_unwind_got_register (this_frame, regnum, regnum);
1757 static const struct frame_unwind amd64_frame_unwind =
1760 amd64_frame_this_id,
1761 amd64_frame_prev_register,
1763 default_frame_sniffer
1767 /* Signal trampolines. */
1769 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1770 64-bit variants. This would require using identical frame caches
1771 on both platforms. */
1773 static struct amd64_frame_cache *
1774 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
1776 struct amd64_frame_cache *cache;
1777 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1785 cache = amd64_alloc_frame_cache ();
1787 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1788 cache->base = extract_unsigned_integer (buf, 8) - 8;
1790 addr = tdep->sigcontext_addr (this_frame);
1791 gdb_assert (tdep->sc_reg_offset);
1792 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
1793 for (i = 0; i < tdep->sc_num_regs; i++)
1794 if (tdep->sc_reg_offset[i] != -1)
1795 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
1797 *this_cache = cache;
1802 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
1803 void **this_cache, struct frame_id *this_id)
1805 struct amd64_frame_cache *cache =
1806 amd64_sigtramp_frame_cache (this_frame, this_cache);
1808 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
1811 static struct value *
1812 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
1813 void **this_cache, int regnum)
1815 /* Make sure we've initialized the cache. */
1816 amd64_sigtramp_frame_cache (this_frame, this_cache);
1818 return amd64_frame_prev_register (this_frame, this_cache, regnum);
1822 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
1823 struct frame_info *this_frame,
1826 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1828 /* We shouldn't even bother if we don't have a sigcontext_addr
1830 if (tdep->sigcontext_addr == NULL)
1833 if (tdep->sigtramp_p != NULL)
1835 if (tdep->sigtramp_p (this_frame))
1839 if (tdep->sigtramp_start != 0)
1841 CORE_ADDR pc = get_frame_pc (this_frame);
1843 gdb_assert (tdep->sigtramp_end != 0);
1844 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
1851 static const struct frame_unwind amd64_sigtramp_frame_unwind =
1854 amd64_sigtramp_frame_this_id,
1855 amd64_sigtramp_frame_prev_register,
1857 amd64_sigtramp_frame_sniffer
1862 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
1864 struct amd64_frame_cache *cache =
1865 amd64_frame_cache (this_frame, this_cache);
1870 static const struct frame_base amd64_frame_base =
1872 &amd64_frame_unwind,
1873 amd64_frame_base_address,
1874 amd64_frame_base_address,
1875 amd64_frame_base_address
1878 static struct frame_id
1879 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1883 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
1885 return frame_id_build (fp + 16, get_frame_pc (this_frame));
1888 /* 16 byte align the SP per frame requirements. */
1891 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1893 return sp & -(CORE_ADDR)16;
1897 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
1898 in the floating-point register set REGSET to register cache
1899 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
1902 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
1903 int regnum, const void *fpregs, size_t len)
1905 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
1907 gdb_assert (len == tdep->sizeof_fpregset);
1908 amd64_supply_fxsave (regcache, regnum, fpregs);
1911 /* Collect register REGNUM from the register cache REGCACHE and store
1912 it in the buffer specified by FPREGS and LEN as described by the
1913 floating-point register set REGSET. If REGNUM is -1, do this for
1914 all registers in REGSET. */
1917 amd64_collect_fpregset (const struct regset *regset,
1918 const struct regcache *regcache,
1919 int regnum, void *fpregs, size_t len)
1921 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
1923 gdb_assert (len == tdep->sizeof_fpregset);
1924 amd64_collect_fxsave (regcache, regnum, fpregs);
1927 /* Return the appropriate register set for the core section identified
1928 by SECT_NAME and SECT_SIZE. */
1930 static const struct regset *
1931 amd64_regset_from_core_section (struct gdbarch *gdbarch,
1932 const char *sect_name, size_t sect_size)
1934 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1936 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
1938 if (tdep->fpregset == NULL)
1939 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
1940 amd64_collect_fpregset);
1942 return tdep->fpregset;
1945 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
1949 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
1950 %rdi. We expect its value to be a pointer to the jmp_buf structure
1951 from which we extract the address that we will land at. This
1952 address is copied into PC. This routine returns non-zero on
1956 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1960 struct gdbarch *gdbarch = get_frame_arch (frame);
1961 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
1962 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
1964 /* If JB_PC_OFFSET is -1, we have no way to find out where the
1965 longjmp will land. */
1966 if (jb_pc_offset == -1)
1969 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
1970 jb_addr= extract_typed_address
1971 (buf, builtin_type (gdbarch)->builtin_data_ptr);
1972 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
1975 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
1981 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1983 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1985 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
1986 floating-point registers. */
1987 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
1989 /* AMD64 has an FPU and 16 SSE registers. */
1990 tdep->st0_regnum = AMD64_ST0_REGNUM;
1991 tdep->num_xmm_regs = 16;
1993 /* This is what all the fuss is about. */
1994 set_gdbarch_long_bit (gdbarch, 64);
1995 set_gdbarch_long_long_bit (gdbarch, 64);
1996 set_gdbarch_ptr_bit (gdbarch, 64);
1998 /* In contrast to the i386, on AMD64 a `long double' actually takes
1999 up 128 bits, even though it's still based on the i387 extended
2000 floating-point format which has only 80 significant bits. */
2001 set_gdbarch_long_double_bit (gdbarch, 128);
2003 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2004 set_gdbarch_register_name (gdbarch, amd64_register_name);
2005 set_gdbarch_register_type (gdbarch, amd64_register_type);
2007 /* Register numbers of various important registers. */
2008 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2009 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2010 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2011 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
2013 /* The "default" register numbering scheme for AMD64 is referred to
2014 as the "DWARF Register Number Mapping" in the System V psABI.
2015 The preferred debugging format for all known AMD64 targets is
2016 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2017 DWARF-1), but we provide the same mapping just in case. This
2018 mapping is also used for stabs, which GCC does support. */
2019 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2020 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2022 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2023 be in use on any of the supported AMD64 targets. */
2025 /* Call dummy code. */
2026 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2027 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
2028 set_gdbarch_frame_red_zone_size (gdbarch, 128);
2030 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
2031 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2032 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2034 set_gdbarch_return_value (gdbarch, amd64_return_value);
2036 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
2038 /* Avoid wiring in the MMX registers for now. */
2039 set_gdbarch_num_pseudo_regs (gdbarch, 0);
2040 tdep->mm0_regnum = -1;
2042 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
2044 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2045 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
2046 frame_base_set_default (gdbarch, &amd64_frame_base);
2048 /* If we have a register mapping, enable the generic core file support. */
2049 if (tdep->gregset_reg_offset)
2050 set_gdbarch_regset_from_core_section (gdbarch,
2051 amd64_regset_from_core_section);
2053 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
2057 /* The 64-bit FXSAVE format differs from the 32-bit format in the
2058 sense that the instruction pointer and data pointer are simply
2059 64-bit offsets into the code segment and the data segment instead
2060 of a selector offset pair. The functions below store the upper 32
2061 bits of these pointers (instead of just the 16-bits of the segment
2064 /* Fill register REGNUM in REGCACHE with the appropriate
2065 floating-point or SSE register value from *FXSAVE. If REGNUM is
2066 -1, do this for all registers. This function masks off any of the
2067 reserved bits in *FXSAVE. */
2070 amd64_supply_fxsave (struct regcache *regcache, int regnum,
2073 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2074 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2076 i387_supply_fxsave (regcache, regnum, fxsave);
2078 if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
2080 const gdb_byte *regs = fxsave;
2082 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2083 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2084 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2085 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2089 /* Fill register REGNUM (if it is a floating-point or SSE register) in
2090 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2091 all registers. This function doesn't touch any of the reserved
2095 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2098 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2099 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2100 gdb_byte *regs = fxsave;
2102 i387_collect_fxsave (regcache, regnum, fxsave);
2104 if (gdbarch_ptr_bit (gdbarch) == 64)
2106 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2107 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2108 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2109 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);