1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
6 Contributed by Jiri Smid, SuSE Labs.
8 This file is part of GDB.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "opcode/i386.h"
26 #include "arch-utils.h"
28 #include "dummy-frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
40 #include "gdb_assert.h"
42 #include "amd64-tdep.h"
43 #include "i387-tdep.h"
45 #include "features/i386/amd64.c"
47 /* Note that the AMD64 architecture was previously known as x86-64.
48 The latter is (forever) engraved into the canonical system name as
49 returned by config.guess, and used as the name for the AMD64 port
50 of GNU/Linux. The BSD's have renamed their ports to amd64; they
51 don't like to shout. For GDB we prefer the amd64_-prefix over the
52 x86_64_-prefix since it's so much easier to type. */
54 /* Register information. */
56 static const char *amd64_register_names[] =
58 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
60 /* %r8 is indeed register number 8. */
61 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
62 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
64 /* %st0 is register number 24. */
65 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
66 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
68 /* %xmm0 is register number 40. */
69 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
70 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
74 /* Total number of registers. */
75 #define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
77 /* The registers used to pass integer arguments during a function call. */
78 static int amd64_dummy_call_integer_regs[] =
80 AMD64_RDI_REGNUM, /* %rdi */
81 AMD64_RSI_REGNUM, /* %rsi */
82 AMD64_RDX_REGNUM, /* %rdx */
83 AMD64_RCX_REGNUM, /* %rcx */
88 /* DWARF Register Number Mapping as defined in the System V psABI,
91 static int amd64_dwarf_regmap[] =
93 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
94 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
95 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
96 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
98 /* Frame Pointer Register RBP. */
101 /* Stack Pointer Register RSP. */
104 /* Extended Integer Registers 8 - 15. */
105 8, 9, 10, 11, 12, 13, 14, 15,
107 /* Return Address RA. Mapped to RIP. */
110 /* SSE Registers 0 - 7. */
111 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
112 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
113 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
114 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
116 /* Extended SSE Registers 8 - 15. */
117 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
118 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
119 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
120 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
122 /* Floating Point Registers 0-7. */
123 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
124 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
125 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
126 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
128 /* Control and Status Flags Register. */
131 /* Selector Registers. */
141 /* Segment Base Address Registers. */
147 /* Special Selector Registers. */
151 /* Floating Point Control Registers. */
157 static const int amd64_dwarf_regmap_len =
158 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
160 /* Convert DWARF register number REG to the appropriate register
161 number used by GDB. */
164 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
168 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
169 regnum = amd64_dwarf_regmap[reg];
172 warning (_("Unmapped DWARF Register #%d encountered."), reg);
177 /* Map architectural register numbers to gdb register numbers. */
179 static const int amd64_arch_regmap[16] =
181 AMD64_RAX_REGNUM, /* %rax */
182 AMD64_RCX_REGNUM, /* %rcx */
183 AMD64_RDX_REGNUM, /* %rdx */
184 AMD64_RBX_REGNUM, /* %rbx */
185 AMD64_RSP_REGNUM, /* %rsp */
186 AMD64_RBP_REGNUM, /* %rbp */
187 AMD64_RSI_REGNUM, /* %rsi */
188 AMD64_RDI_REGNUM, /* %rdi */
189 AMD64_R8_REGNUM, /* %r8 */
190 AMD64_R9_REGNUM, /* %r9 */
191 AMD64_R10_REGNUM, /* %r10 */
192 AMD64_R11_REGNUM, /* %r11 */
193 AMD64_R12_REGNUM, /* %r12 */
194 AMD64_R13_REGNUM, /* %r13 */
195 AMD64_R14_REGNUM, /* %r14 */
196 AMD64_R15_REGNUM /* %r15 */
199 static const int amd64_arch_regmap_len =
200 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
202 /* Convert architectural register number REG to the appropriate register
203 number used by GDB. */
206 amd64_arch_reg_to_regnum (int reg)
208 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
210 return amd64_arch_regmap[reg];
213 /* Register names for byte pseudo-registers. */
215 static const char *amd64_byte_names[] =
217 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
218 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l"
221 /* Register names for word pseudo-registers. */
223 static const char *amd64_word_names[] =
225 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
226 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
229 /* Register names for dword pseudo-registers. */
231 static const char *amd64_dword_names[] =
233 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
234 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d"
237 /* Return the name of register REGNUM. */
240 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
242 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
243 if (i386_byte_regnum_p (gdbarch, regnum))
244 return amd64_byte_names[regnum - tdep->al_regnum];
245 else if (i386_word_regnum_p (gdbarch, regnum))
246 return amd64_word_names[regnum - tdep->ax_regnum];
247 else if (i386_dword_regnum_p (gdbarch, regnum))
248 return amd64_dword_names[regnum - tdep->eax_regnum];
250 return i386_pseudo_register_name (gdbarch, regnum);
254 amd64_pseudo_register_read (struct gdbarch *gdbarch,
255 struct regcache *regcache,
256 int regnum, gdb_byte *buf)
258 gdb_byte raw_buf[MAX_REGISTER_SIZE];
259 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
261 if (i386_byte_regnum_p (gdbarch, regnum))
263 int gpnum = regnum - tdep->al_regnum;
265 /* Extract (always little endian). */
266 regcache_raw_read (regcache, gpnum, raw_buf);
267 memcpy (buf, raw_buf, 1);
269 else if (i386_dword_regnum_p (gdbarch, regnum))
271 int gpnum = regnum - tdep->eax_regnum;
272 /* Extract (always little endian). */
273 regcache_raw_read (regcache, gpnum, raw_buf);
274 memcpy (buf, raw_buf, 4);
277 i386_pseudo_register_read (gdbarch, regcache, regnum, buf);
281 amd64_pseudo_register_write (struct gdbarch *gdbarch,
282 struct regcache *regcache,
283 int regnum, const gdb_byte *buf)
285 gdb_byte raw_buf[MAX_REGISTER_SIZE];
286 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
288 if (i386_byte_regnum_p (gdbarch, regnum))
290 int gpnum = regnum - tdep->al_regnum;
293 regcache_raw_read (regcache, gpnum, raw_buf);
294 /* ... Modify ... (always little endian). */
295 memcpy (raw_buf, buf, 1);
297 regcache_raw_write (regcache, gpnum, raw_buf);
299 else if (i386_dword_regnum_p (gdbarch, regnum))
301 int gpnum = regnum - tdep->eax_regnum;
304 regcache_raw_read (regcache, gpnum, raw_buf);
305 /* ... Modify ... (always little endian). */
306 memcpy (raw_buf, buf, 4);
308 regcache_raw_write (regcache, gpnum, raw_buf);
311 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
316 /* Return the union class of CLASS1 and CLASS2. See the psABI for
319 static enum amd64_reg_class
320 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
322 /* Rule (a): If both classes are equal, this is the resulting class. */
323 if (class1 == class2)
326 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
327 is the other class. */
328 if (class1 == AMD64_NO_CLASS)
330 if (class2 == AMD64_NO_CLASS)
333 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
334 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
337 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
338 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
339 return AMD64_INTEGER;
341 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
342 MEMORY is used as class. */
343 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
344 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
345 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
348 /* Rule (f): Otherwise class SSE is used. */
352 /* Return non-zero if TYPE is a non-POD structure or union type. */
355 amd64_non_pod_p (struct type *type)
357 /* ??? A class with a base class certainly isn't POD, but does this
358 catch all non-POD structure types? */
359 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
365 /* Classify TYPE according to the rules for aggregate (structures and
366 arrays) and union types, and store the result in CLASS. */
369 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
371 int len = TYPE_LENGTH (type);
373 /* 1. If the size of an object is larger than two eightbytes, or in
374 C++, is a non-POD structure or union type, or contains
375 unaligned fields, it has class memory. */
376 if (len > 16 || amd64_non_pod_p (type))
378 class[0] = class[1] = AMD64_MEMORY;
382 /* 2. Both eightbytes get initialized to class NO_CLASS. */
383 class[0] = class[1] = AMD64_NO_CLASS;
385 /* 3. Each field of an object is classified recursively so that
386 always two fields are considered. The resulting class is
387 calculated according to the classes of the fields in the
390 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
392 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
394 /* All fields in an array have the same type. */
395 amd64_classify (subtype, class);
396 if (len > 8 && class[1] == AMD64_NO_CLASS)
403 /* Structure or union. */
404 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
405 || TYPE_CODE (type) == TYPE_CODE_UNION);
407 for (i = 0; i < TYPE_NFIELDS (type); i++)
409 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
410 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
411 enum amd64_reg_class subclass[2];
412 int bitsize = TYPE_FIELD_BITSIZE (type, i);
416 bitsize = TYPE_LENGTH (subtype) * 8;
417 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
419 /* Ignore static fields. */
420 if (field_is_static (&TYPE_FIELD (type, i)))
423 gdb_assert (pos == 0 || pos == 1);
425 amd64_classify (subtype, subclass);
426 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
427 if (bitsize <= 64 && pos == 0 && endpos == 1)
428 /* This is a bit of an odd case: We have a field that would
429 normally fit in one of the two eightbytes, except that
430 it is placed in a way that this field straddles them.
431 This has been seen with a structure containing an array.
433 The ABI is a bit unclear in this case, but we assume that
434 this field's class (stored in subclass[0]) must also be merged
435 into class[1]. In other words, our field has a piece stored
436 in the second eight-byte, and thus its class applies to
437 the second eight-byte as well.
439 In the case where the field length exceeds 8 bytes,
440 it should not be necessary to merge the field class
441 into class[1]. As LEN > 8, subclass[1] is necessarily
442 different from AMD64_NO_CLASS. If subclass[1] is equal
443 to subclass[0], then the normal class[1]/subclass[1]
444 merging will take care of everything. For subclass[1]
445 to be different from subclass[0], I can only see the case
446 where we have a SSE/SSEUP or X87/X87UP pair, which both
447 use up all 16 bytes of the aggregate, and are already
448 handled just fine (because each portion sits on its own
450 class[1] = amd64_merge_classes (class[1], subclass[0]);
452 class[1] = amd64_merge_classes (class[1], subclass[1]);
456 /* 4. Then a post merger cleanup is done: */
458 /* Rule (a): If one of the classes is MEMORY, the whole argument is
460 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
461 class[0] = class[1] = AMD64_MEMORY;
463 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
465 if (class[0] == AMD64_SSEUP)
466 class[0] = AMD64_SSE;
467 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
468 class[1] = AMD64_SSE;
471 /* Classify TYPE, and store the result in CLASS. */
474 amd64_classify (struct type *type, enum amd64_reg_class class[2])
476 enum type_code code = TYPE_CODE (type);
477 int len = TYPE_LENGTH (type);
479 class[0] = class[1] = AMD64_NO_CLASS;
481 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
482 long, long long, and pointers are in the INTEGER class. Similarly,
483 range types, used by languages such as Ada, are also in the INTEGER
485 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
486 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
487 || code == TYPE_CODE_CHAR
488 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
489 && (len == 1 || len == 2 || len == 4 || len == 8))
490 class[0] = AMD64_INTEGER;
492 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
494 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
495 && (len == 4 || len == 8))
497 class[0] = AMD64_SSE;
499 /* Arguments of types __float128, _Decimal128 and __m128 are split into
500 two halves. The least significant ones belong to class SSE, the most
501 significant one to class SSEUP. */
502 else if (code == TYPE_CODE_DECFLOAT && len == 16)
503 /* FIXME: __float128, __m128. */
504 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
506 /* The 64-bit mantissa of arguments of type long double belongs to
507 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
509 else if (code == TYPE_CODE_FLT && len == 16)
510 /* Class X87 and X87UP. */
511 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
514 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
515 || code == TYPE_CODE_UNION)
516 amd64_classify_aggregate (type, class);
519 static enum return_value_convention
520 amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
521 struct type *type, struct regcache *regcache,
522 gdb_byte *readbuf, const gdb_byte *writebuf)
524 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
525 enum amd64_reg_class class[2];
526 int len = TYPE_LENGTH (type);
527 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
528 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
533 gdb_assert (!(readbuf && writebuf));
534 gdb_assert (tdep->classify);
536 /* 1. Classify the return type with the classification algorithm. */
537 tdep->classify (type, class);
539 /* 2. If the type has class MEMORY, then the caller provides space
540 for the return value and passes the address of this storage in
541 %rdi as if it were the first argument to the function. In effect,
542 this address becomes a hidden first argument.
544 On return %rax will contain the address that has been passed in
545 by the caller in %rdi. */
546 if (class[0] == AMD64_MEMORY)
548 /* As indicated by the comment above, the ABI guarantees that we
549 can always find the return value just after the function has
556 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
557 read_memory (addr, readbuf, TYPE_LENGTH (type));
560 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
563 gdb_assert (class[1] != AMD64_MEMORY);
564 gdb_assert (len <= 16);
566 for (i = 0; len > 0; i++, len -= 8)
574 /* 3. If the class is INTEGER, the next available register
575 of the sequence %rax, %rdx is used. */
576 regnum = integer_regnum[integer_reg++];
580 /* 4. If the class is SSE, the next available SSE register
581 of the sequence %xmm0, %xmm1 is used. */
582 regnum = sse_regnum[sse_reg++];
586 /* 5. If the class is SSEUP, the eightbyte is passed in the
587 upper half of the last used SSE register. */
588 gdb_assert (sse_reg > 0);
589 regnum = sse_regnum[sse_reg - 1];
594 /* 6. If the class is X87, the value is returned on the X87
595 stack in %st0 as 80-bit x87 number. */
596 regnum = AMD64_ST0_REGNUM;
598 i387_return_value (gdbarch, regcache);
602 /* 7. If the class is X87UP, the value is returned together
603 with the previous X87 value in %st0. */
604 gdb_assert (i > 0 && class[0] == AMD64_X87);
605 regnum = AMD64_ST0_REGNUM;
614 gdb_assert (!"Unexpected register class.");
617 gdb_assert (regnum != -1);
620 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
623 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
627 return RETURN_VALUE_REGISTER_CONVENTION;
632 amd64_push_arguments (struct regcache *regcache, int nargs,
633 struct value **args, CORE_ADDR sp, int struct_return)
635 struct gdbarch *gdbarch = get_regcache_arch (regcache);
636 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
637 int *integer_regs = tdep->call_dummy_integer_regs;
638 int num_integer_regs = tdep->call_dummy_num_integer_regs;
640 static int sse_regnum[] =
642 /* %xmm0 ... %xmm7 */
643 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
644 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
645 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
646 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
648 struct value **stack_args = alloca (nargs * sizeof (struct value *));
649 /* An array that mirrors the stack_args array. For all arguments
650 that are passed by MEMORY, if that argument's address also needs
651 to be stored in a register, the ARG_ADDR_REGNO array will contain
652 that register number (or a negative value otherwise). */
653 int *arg_addr_regno = alloca (nargs * sizeof (int));
654 int num_stack_args = 0;
655 int num_elements = 0;
661 gdb_assert (tdep->classify);
663 /* Reserve a register for the "hidden" argument. */
667 for (i = 0; i < nargs; i++)
669 struct type *type = value_type (args[i]);
670 int len = TYPE_LENGTH (type);
671 enum amd64_reg_class class[2];
672 int needed_integer_regs = 0;
673 int needed_sse_regs = 0;
676 /* Classify argument. */
677 tdep->classify (type, class);
679 /* Calculate the number of integer and SSE registers needed for
681 for (j = 0; j < 2; j++)
683 if (class[j] == AMD64_INTEGER)
684 needed_integer_regs++;
685 else if (class[j] == AMD64_SSE)
689 /* Check whether enough registers are available, and if the
690 argument should be passed in registers at all. */
691 if (integer_reg + needed_integer_regs > num_integer_regs
692 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
693 || (needed_integer_regs == 0 && needed_sse_regs == 0))
695 /* The argument will be passed on the stack. */
696 num_elements += ((len + 7) / 8);
697 stack_args[num_stack_args] = args[i];
698 /* If this is an AMD64_MEMORY argument whose address must also
699 be passed in one of the integer registers, reserve that
700 register and associate this value to that register so that
701 we can store the argument address as soon as we know it. */
702 if (class[0] == AMD64_MEMORY
703 && tdep->memory_args_by_pointer
704 && integer_reg < tdep->call_dummy_num_integer_regs)
705 arg_addr_regno[num_stack_args] =
706 tdep->call_dummy_integer_regs[integer_reg++];
708 arg_addr_regno[num_stack_args] = -1;
713 /* The argument will be passed in registers. */
714 const gdb_byte *valbuf = value_contents (args[i]);
717 gdb_assert (len <= 16);
719 for (j = 0; len > 0; j++, len -= 8)
727 regnum = integer_regs[integer_reg++];
731 regnum = sse_regnum[sse_reg++];
735 gdb_assert (sse_reg > 0);
736 regnum = sse_regnum[sse_reg - 1];
741 gdb_assert (!"Unexpected register class.");
744 gdb_assert (regnum != -1);
745 memset (buf, 0, sizeof buf);
746 memcpy (buf, valbuf + j * 8, min (len, 8));
747 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
752 /* Allocate space for the arguments on the stack. */
753 sp -= num_elements * 8;
755 /* The psABI says that "The end of the input argument area shall be
756 aligned on a 16 byte boundary." */
759 /* Write out the arguments to the stack. */
760 for (i = 0; i < num_stack_args; i++)
762 struct type *type = value_type (stack_args[i]);
763 const gdb_byte *valbuf = value_contents (stack_args[i]);
764 int len = TYPE_LENGTH (type);
765 CORE_ADDR arg_addr = sp + element * 8;
767 write_memory (arg_addr, valbuf, len);
768 if (arg_addr_regno[i] >= 0)
770 /* We also need to store the address of that argument in
771 the given register. */
773 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
775 store_unsigned_integer (buf, 8, byte_order, arg_addr);
776 regcache_cooked_write (regcache, arg_addr_regno[i], buf);
778 element += ((len + 7) / 8);
781 /* The psABI says that "For calls that may call functions that use
782 varargs or stdargs (prototype-less calls or calls to functions
783 containing ellipsis (...) in the declaration) %al is used as
784 hidden argument to specify the number of SSE registers used. */
785 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
790 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
791 struct regcache *regcache, CORE_ADDR bp_addr,
792 int nargs, struct value **args, CORE_ADDR sp,
793 int struct_return, CORE_ADDR struct_addr)
795 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
796 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
799 /* Pass arguments. */
800 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
802 /* Pass "hidden" argument". */
805 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
806 /* The "hidden" argument is passed throught the first argument
808 const int arg_regnum = tdep->call_dummy_integer_regs[0];
810 store_unsigned_integer (buf, 8, byte_order, struct_addr);
811 regcache_cooked_write (regcache, arg_regnum, buf);
814 /* Reserve some memory on the stack for the integer-parameter registers,
815 if required by the ABI. */
816 if (tdep->integer_param_regs_saved_in_caller_frame)
817 sp -= tdep->call_dummy_num_integer_regs * 8;
819 /* Store return address. */
821 store_unsigned_integer (buf, 8, byte_order, bp_addr);
822 write_memory (sp, buf, 8);
824 /* Finally, update the stack pointer... */
825 store_unsigned_integer (buf, 8, byte_order, sp);
826 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
828 /* ...and fake a frame pointer. */
829 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
834 /* Displaced instruction handling. */
836 /* A partially decoded instruction.
837 This contains enough details for displaced stepping purposes. */
841 /* The number of opcode bytes. */
843 /* The offset of the rex prefix or -1 if not present. */
845 /* The offset to the first opcode byte. */
847 /* The offset to the modrm byte or -1 if not present. */
850 /* The raw instruction. */
854 struct displaced_step_closure
856 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
861 /* Details of the instruction. */
862 struct amd64_insn insn_details;
864 /* Amount of space allocated to insn_buf. */
867 /* The possibly modified insn.
868 This is a variable-length field. */
869 gdb_byte insn_buf[1];
872 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
873 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
874 at which point delete these in favor of libopcodes' versions). */
876 static const unsigned char onebyte_has_modrm[256] = {
877 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
878 /* ------------------------------- */
879 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
880 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
881 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
882 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
883 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
884 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
885 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
886 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
887 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
888 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
889 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
890 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
891 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
892 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
893 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
894 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
895 /* ------------------------------- */
896 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
899 static const unsigned char twobyte_has_modrm[256] = {
900 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
901 /* ------------------------------- */
902 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
903 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
904 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
905 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
906 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
907 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
908 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
909 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
910 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
911 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
912 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
913 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
914 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
915 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
916 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
917 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
918 /* ------------------------------- */
919 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
922 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
925 rex_prefix_p (gdb_byte pfx)
927 return REX_PREFIX_P (pfx);
930 /* Skip the legacy instruction prefixes in INSN.
931 We assume INSN is properly sentineled so we don't have to worry
932 about falling off the end of the buffer. */
935 amd64_skip_prefixes (gdb_byte *insn)
941 case DATA_PREFIX_OPCODE:
942 case ADDR_PREFIX_OPCODE:
943 case CS_PREFIX_OPCODE:
944 case DS_PREFIX_OPCODE:
945 case ES_PREFIX_OPCODE:
946 case FS_PREFIX_OPCODE:
947 case GS_PREFIX_OPCODE:
948 case SS_PREFIX_OPCODE:
949 case LOCK_PREFIX_OPCODE:
950 case REPE_PREFIX_OPCODE:
951 case REPNE_PREFIX_OPCODE:
963 /* fprintf-function for amd64_insn_length.
964 This function is a nop, we don't want to print anything, we just want to
965 compute the length of the insn. */
967 static int ATTR_FORMAT (printf, 2, 3)
968 amd64_insn_length_fprintf (void *stream, const char *format, ...)
973 /* Initialize a struct disassemble_info for amd64_insn_length. */
976 amd64_insn_length_init_dis (struct gdbarch *gdbarch,
977 struct disassemble_info *di,
978 const gdb_byte *insn, int max_len,
981 init_disassemble_info (di, NULL, amd64_insn_length_fprintf);
983 /* init_disassemble_info installs buffer_read_memory, etc.
984 so we don't need to do that here.
985 The cast is necessary until disassemble_info is const-ified. */
986 di->buffer = (gdb_byte *) insn;
987 di->buffer_length = max_len;
988 di->buffer_vma = addr;
990 di->arch = gdbarch_bfd_arch_info (gdbarch)->arch;
991 di->mach = gdbarch_bfd_arch_info (gdbarch)->mach;
992 di->endian = gdbarch_byte_order (gdbarch);
993 di->endian_code = gdbarch_byte_order_for_code (gdbarch);
995 disassemble_init_for_target (di);
998 /* Return the length in bytes of INSN.
999 MAX_LEN is the size of the buffer containing INSN.
1000 libopcodes currently doesn't export a utility to compute the
1001 instruction length, so use the disassembler until then. */
1004 amd64_insn_length (struct gdbarch *gdbarch,
1005 const gdb_byte *insn, int max_len, CORE_ADDR addr)
1007 struct disassemble_info di;
1009 amd64_insn_length_init_dis (gdbarch, &di, insn, max_len, addr);
1011 return gdbarch_print_insn (gdbarch, addr, &di);
1014 /* Return an integer register (other than RSP) that is unused as an input
1016 In order to not require adding a rex prefix if the insn doesn't already
1017 have one, the result is restricted to RAX ... RDI, sans RSP.
1018 The register numbering of the result follows architecture ordering,
1022 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1024 /* 1 bit for each reg */
1025 int used_regs_mask = 0;
1027 /* There can be at most 3 int regs used as inputs in an insn, and we have
1028 7 to choose from (RAX ... RDI, sans RSP).
1029 This allows us to take a conservative approach and keep things simple.
1030 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1031 that implicitly specify RAX. */
1034 used_regs_mask |= 1 << EAX_REG_NUM;
1035 /* Similarily avoid RDX, implicit operand in divides. */
1036 used_regs_mask |= 1 << EDX_REG_NUM;
1038 used_regs_mask |= 1 << ESP_REG_NUM;
1040 /* If the opcode is one byte long and there's no ModRM byte,
1041 assume the opcode specifies a register. */
1042 if (details->opcode_len == 1 && details->modrm_offset == -1)
1043 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1045 /* Mark used regs in the modrm/sib bytes. */
1046 if (details->modrm_offset != -1)
1048 int modrm = details->raw_insn[details->modrm_offset];
1049 int mod = MODRM_MOD_FIELD (modrm);
1050 int reg = MODRM_REG_FIELD (modrm);
1051 int rm = MODRM_RM_FIELD (modrm);
1052 int have_sib = mod != 3 && rm == 4;
1054 /* Assume the reg field of the modrm byte specifies a register. */
1055 used_regs_mask |= 1 << reg;
1059 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1060 int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1061 used_regs_mask |= 1 << base;
1062 used_regs_mask |= 1 << index;
1066 used_regs_mask |= 1 << rm;
1070 gdb_assert (used_regs_mask < 256);
1071 gdb_assert (used_regs_mask != 255);
1073 /* Finally, find a free reg. */
1077 for (i = 0; i < 8; ++i)
1079 if (! (used_regs_mask & (1 << i)))
1083 /* We shouldn't get here. */
1084 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1088 /* Extract the details of INSN that we need. */
1091 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1093 gdb_byte *start = insn;
1096 details->raw_insn = insn;
1098 details->opcode_len = -1;
1099 details->rex_offset = -1;
1100 details->opcode_offset = -1;
1101 details->modrm_offset = -1;
1103 /* Skip legacy instruction prefixes. */
1104 insn = amd64_skip_prefixes (insn);
1106 /* Skip REX instruction prefix. */
1107 if (rex_prefix_p (*insn))
1109 details->rex_offset = insn - start;
1113 details->opcode_offset = insn - start;
1115 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1117 /* Two or three-byte opcode. */
1119 need_modrm = twobyte_has_modrm[*insn];
1121 /* Check for three-byte opcode. */
1131 details->opcode_len = 3;
1134 details->opcode_len = 2;
1140 /* One-byte opcode. */
1141 need_modrm = onebyte_has_modrm[*insn];
1142 details->opcode_len = 1;
1148 details->modrm_offset = insn - start;
1152 /* Update %rip-relative addressing in INSN.
1154 %rip-relative addressing only uses a 32-bit displacement.
1155 32 bits is not enough to be guaranteed to cover the distance between where
1156 the real instruction is and where its copy is.
1157 Convert the insn to use base+disp addressing.
1158 We set base = pc + insn_length so we can leave disp unchanged. */
1161 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1162 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1164 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1165 const struct amd64_insn *insn_details = &dsc->insn_details;
1166 int modrm_offset = insn_details->modrm_offset;
1167 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1171 int arch_tmp_regno, tmp_regno;
1172 ULONGEST orig_value;
1174 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1177 /* Compute the rip-relative address. */
1178 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1179 insn_length = amd64_insn_length (gdbarch, dsc->insn_buf, dsc->max_len, from);
1180 rip_base = from + insn_length;
1182 /* We need a register to hold the address.
1183 Pick one not used in the insn.
1184 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1185 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1186 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1188 /* REX.B should be unset as we were using rip-relative addressing,
1189 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1190 if (insn_details->rex_offset != -1)
1191 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1193 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1194 dsc->tmp_regno = tmp_regno;
1195 dsc->tmp_save = orig_value;
1198 /* Convert the ModRM field to be base+disp. */
1199 dsc->insn_buf[modrm_offset] &= ~0xc7;
1200 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1202 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1204 if (debug_displaced)
1205 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1206 "displaced: using temp reg %d, old value %s, new value %s\n",
1207 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1208 paddress (gdbarch, rip_base));
1212 fixup_displaced_copy (struct gdbarch *gdbarch,
1213 struct displaced_step_closure *dsc,
1214 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1216 const struct amd64_insn *details = &dsc->insn_details;
1218 if (details->modrm_offset != -1)
1220 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1222 if ((modrm & 0xc7) == 0x05)
1224 /* The insn uses rip-relative addressing.
1226 fixup_riprel (gdbarch, dsc, from, to, regs);
1231 struct displaced_step_closure *
1232 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1233 CORE_ADDR from, CORE_ADDR to,
1234 struct regcache *regs)
1236 int len = gdbarch_max_insn_length (gdbarch);
1237 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1238 continually watch for running off the end of the buffer. */
1239 int fixup_sentinel_space = len;
1240 struct displaced_step_closure *dsc =
1241 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1242 gdb_byte *buf = &dsc->insn_buf[0];
1243 struct amd64_insn *details = &dsc->insn_details;
1246 dsc->max_len = len + fixup_sentinel_space;
1248 read_memory (from, buf, len);
1250 /* Set up the sentinel space so we don't have to worry about running
1251 off the end of the buffer. An excessive number of leading prefixes
1252 could otherwise cause this. */
1253 memset (buf + len, 0, fixup_sentinel_space);
1255 amd64_get_insn_details (buf, details);
1257 /* GDB may get control back after the insn after the syscall.
1258 Presumably this is a kernel bug.
1259 If this is a syscall, make sure there's a nop afterwards. */
1263 if (amd64_syscall_p (details, &syscall_length))
1264 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1267 /* Modify the insn to cope with the address where it will be executed from.
1268 In particular, handle any rip-relative addressing. */
1269 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1271 write_memory (to, buf, len);
1273 if (debug_displaced)
1275 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1276 paddress (gdbarch, from), paddress (gdbarch, to));
1277 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1284 amd64_absolute_jmp_p (const struct amd64_insn *details)
1286 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1288 if (insn[0] == 0xff)
1290 /* jump near, absolute indirect (/4) */
1291 if ((insn[1] & 0x38) == 0x20)
1294 /* jump far, absolute indirect (/5) */
1295 if ((insn[1] & 0x38) == 0x28)
1303 amd64_absolute_call_p (const struct amd64_insn *details)
1305 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1307 if (insn[0] == 0xff)
1309 /* Call near, absolute indirect (/2) */
1310 if ((insn[1] & 0x38) == 0x10)
1313 /* Call far, absolute indirect (/3) */
1314 if ((insn[1] & 0x38) == 0x18)
1322 amd64_ret_p (const struct amd64_insn *details)
1324 /* NOTE: gcc can emit "repz ; ret". */
1325 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1329 case 0xc2: /* ret near, pop N bytes */
1330 case 0xc3: /* ret near */
1331 case 0xca: /* ret far, pop N bytes */
1332 case 0xcb: /* ret far */
1333 case 0xcf: /* iret */
1342 amd64_call_p (const struct amd64_insn *details)
1344 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1346 if (amd64_absolute_call_p (details))
1349 /* call near, relative */
1350 if (insn[0] == 0xe8)
1356 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1357 length in bytes. Otherwise, return zero. */
1360 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1362 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1364 if (insn[0] == 0x0f && insn[1] == 0x05)
1373 /* Fix up the state of registers and memory after having single-stepped
1374 a displaced instruction. */
1377 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1378 struct displaced_step_closure *dsc,
1379 CORE_ADDR from, CORE_ADDR to,
1380 struct regcache *regs)
1382 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1383 /* The offset we applied to the instruction's address. */
1384 ULONGEST insn_offset = to - from;
1385 gdb_byte *insn = dsc->insn_buf;
1386 const struct amd64_insn *insn_details = &dsc->insn_details;
1388 if (debug_displaced)
1389 fprintf_unfiltered (gdb_stdlog,
1390 "displaced: fixup (%s, %s), "
1391 "insn = 0x%02x 0x%02x ...\n",
1392 paddress (gdbarch, from), paddress (gdbarch, to),
1395 /* If we used a tmp reg, restore it. */
1399 if (debug_displaced)
1400 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1401 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1402 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1405 /* The list of issues to contend with here is taken from
1406 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1407 Yay for Free Software! */
1409 /* Relocate the %rip back to the program's instruction stream,
1412 /* Except in the case of absolute or indirect jump or call
1413 instructions, or a return instruction, the new rip is relative to
1414 the displaced instruction; make it relative to the original insn.
1415 Well, signal handler returns don't need relocation either, but we use the
1416 value of %rip to recognize those; see below. */
1417 if (! amd64_absolute_jmp_p (insn_details)
1418 && ! amd64_absolute_call_p (insn_details)
1419 && ! amd64_ret_p (insn_details))
1424 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1426 /* A signal trampoline system call changes the %rip, resuming
1427 execution of the main program after the signal handler has
1428 returned. That makes them like 'return' instructions; we
1429 shouldn't relocate %rip.
1431 But most system calls don't, and we do need to relocate %rip.
1433 Our heuristic for distinguishing these cases: if stepping
1434 over the system call instruction left control directly after
1435 the instruction, the we relocate --- control almost certainly
1436 doesn't belong in the displaced copy. Otherwise, we assume
1437 the instruction has put control where it belongs, and leave
1438 it unrelocated. Goodness help us if there are PC-relative
1440 if (amd64_syscall_p (insn_details, &insn_len)
1441 && orig_rip != to + insn_len
1442 /* GDB can get control back after the insn after the syscall.
1443 Presumably this is a kernel bug.
1444 Fixup ensures its a nop, we add one to the length for it. */
1445 && orig_rip != to + insn_len + 1)
1447 if (debug_displaced)
1448 fprintf_unfiltered (gdb_stdlog,
1449 "displaced: syscall changed %%rip; "
1450 "not relocating\n");
1454 ULONGEST rip = orig_rip - insn_offset;
1456 /* If we just stepped over a breakpoint insn, we don't backup
1457 the pc on purpose; this is to match behaviour without
1460 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1462 if (debug_displaced)
1463 fprintf_unfiltered (gdb_stdlog,
1465 "relocated %%rip from %s to %s\n",
1466 paddress (gdbarch, orig_rip),
1467 paddress (gdbarch, rip));
1471 /* If the instruction was PUSHFL, then the TF bit will be set in the
1472 pushed value, and should be cleared. We'll leave this for later,
1473 since GDB already messes up the TF flag when stepping over a
1476 /* If the instruction was a call, the return address now atop the
1477 stack is the address following the copied instruction. We need
1478 to make it the address following the original instruction. */
1479 if (amd64_call_p (insn_details))
1483 const ULONGEST retaddr_len = 8;
1485 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1486 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1487 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1488 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1490 if (debug_displaced)
1491 fprintf_unfiltered (gdb_stdlog,
1492 "displaced: relocated return addr at %s "
1494 paddress (gdbarch, rsp),
1495 paddress (gdbarch, retaddr));
1499 /* The maximum number of saved registers. This should include %rip. */
1500 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1502 struct amd64_frame_cache
1506 CORE_ADDR sp_offset;
1509 /* Saved registers. */
1510 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1514 /* Do we have a frame? */
1518 /* Initialize a frame cache. */
1521 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1527 cache->sp_offset = -8;
1530 /* Saved registers. We initialize these to -1 since zero is a valid
1531 offset (that's where %rbp is supposed to be stored).
1532 The values start out as being offsets, and are later converted to
1533 addresses (at which point -1 is interpreted as an address, still meaning
1535 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1536 cache->saved_regs[i] = -1;
1537 cache->saved_sp = 0;
1538 cache->saved_sp_reg = -1;
1540 /* Frameless until proven otherwise. */
1541 cache->frameless_p = 1;
1544 /* Allocate and initialize a frame cache. */
1546 static struct amd64_frame_cache *
1547 amd64_alloc_frame_cache (void)
1549 struct amd64_frame_cache *cache;
1551 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1552 amd64_init_frame_cache (cache);
1556 /* GCC 4.4 and later, can put code in the prologue to realign the
1557 stack pointer. Check whether PC points to such code, and update
1558 CACHE accordingly. Return the first instruction after the code
1559 sequence or CURRENT_PC, whichever is smaller. If we don't
1560 recognize the code, return PC. */
1563 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1564 struct amd64_frame_cache *cache)
1566 /* There are 2 code sequences to re-align stack before the frame
1569 1. Use a caller-saved saved register:
1575 2. Use a callee-saved saved register:
1582 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1584 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1585 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1590 int offset, offset_and;
1592 if (target_read_memory (pc, buf, sizeof buf))
1595 /* Check caller-saved saved register. The first instruction has
1596 to be "leaq 8(%rsp), %reg". */
1597 if ((buf[0] & 0xfb) == 0x48
1602 /* MOD must be binary 10 and R/M must be binary 100. */
1603 if ((buf[2] & 0xc7) != 0x44)
1606 /* REG has register number. */
1607 reg = (buf[2] >> 3) & 7;
1609 /* Check the REX.R bit. */
1617 /* Check callee-saved saved register. The first instruction
1618 has to be "pushq %reg". */
1620 if ((buf[0] & 0xf8) == 0x50)
1622 else if ((buf[0] & 0xf6) == 0x40
1623 && (buf[1] & 0xf8) == 0x50)
1625 /* Check the REX.B bit. */
1626 if ((buf[0] & 1) != 0)
1635 reg += buf[offset] & 0x7;
1639 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1640 if ((buf[offset] & 0xfb) != 0x48
1641 || buf[offset + 1] != 0x8d
1642 || buf[offset + 3] != 0x24
1643 || buf[offset + 4] != 0x10)
1646 /* MOD must be binary 10 and R/M must be binary 100. */
1647 if ((buf[offset + 2] & 0xc7) != 0x44)
1650 /* REG has register number. */
1651 r = (buf[offset + 2] >> 3) & 7;
1653 /* Check the REX.R bit. */
1654 if (buf[offset] == 0x4c)
1657 /* Registers in pushq and leaq have to be the same. */
1664 /* Rigister can't be %rsp nor %rbp. */
1665 if (reg == 4 || reg == 5)
1668 /* The next instruction has to be "andq $-XXX, %rsp". */
1669 if (buf[offset] != 0x48
1670 || buf[offset + 2] != 0xe4
1671 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1674 offset_and = offset;
1675 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1677 /* The next instruction has to be "pushq -8(%reg)". */
1679 if (buf[offset] == 0xff)
1681 else if ((buf[offset] & 0xf6) == 0x40
1682 && buf[offset + 1] == 0xff)
1684 /* Check the REX.B bit. */
1685 if ((buf[offset] & 0x1) != 0)
1692 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1694 if (buf[offset + 1] != 0xf8
1695 || (buf[offset] & 0xf8) != 0x70)
1698 /* R/M has register. */
1699 r += buf[offset] & 7;
1701 /* Registers in leaq and pushq have to be the same. */
1705 if (current_pc > pc + offset_and)
1706 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
1708 return min (pc + offset + 2, current_pc);
1711 /* Do a limited analysis of the prologue at PC and update CACHE
1712 accordingly. Bail out early if CURRENT_PC is reached. Return the
1713 address where the analysis stopped.
1715 We will handle only functions beginning with:
1718 movq %rsp, %rbp 0x48 0x89 0xe5
1720 Any function that doesn't start with this sequence will be assumed
1721 to have no prologue and thus no valid frame pointer in %rbp. */
1724 amd64_analyze_prologue (struct gdbarch *gdbarch,
1725 CORE_ADDR pc, CORE_ADDR current_pc,
1726 struct amd64_frame_cache *cache)
1728 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1729 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1733 if (current_pc <= pc)
1736 pc = amd64_analyze_stack_align (pc, current_pc, cache);
1738 op = read_memory_unsigned_integer (pc, 1, byte_order);
1740 if (op == 0x55) /* pushq %rbp */
1742 /* Take into account that we've executed the `pushq %rbp' that
1743 starts this instruction sequence. */
1744 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
1745 cache->sp_offset += 8;
1747 /* If that's all, return now. */
1748 if (current_pc <= pc + 1)
1751 /* Check for `movq %rsp, %rbp'. */
1752 read_memory (pc + 1, buf, 3);
1753 if (memcmp (buf, proto, 3) != 0)
1756 /* OK, we actually have a frame. */
1757 cache->frameless_p = 0;
1764 /* Return PC of first real instruction. */
1767 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
1769 struct amd64_frame_cache cache;
1772 amd64_init_frame_cache (&cache);
1773 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
1775 if (cache.frameless_p)
1782 /* Normal frames. */
1784 static struct amd64_frame_cache *
1785 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
1787 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1788 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1789 struct amd64_frame_cache *cache;
1796 cache = amd64_alloc_frame_cache ();
1797 *this_cache = cache;
1799 cache->pc = get_frame_func (this_frame);
1801 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
1804 if (cache->saved_sp_reg != -1)
1806 /* Stack pointer has been saved. */
1807 get_frame_register (this_frame, cache->saved_sp_reg, buf);
1808 cache->saved_sp = extract_unsigned_integer(buf, 8, byte_order);
1811 if (cache->frameless_p)
1813 /* We didn't find a valid frame. If we're at the start of a
1814 function, or somewhere half-way its prologue, the function's
1815 frame probably hasn't been fully setup yet. Try to
1816 reconstruct the base address for the stack frame by looking
1817 at the stack pointer. For truly "frameless" functions this
1820 if (cache->saved_sp_reg != -1)
1822 /* We're halfway aligning the stack. */
1823 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1824 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1826 /* This will be added back below. */
1827 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1831 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1832 cache->base = extract_unsigned_integer (buf, 8, byte_order)
1838 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
1839 cache->base = extract_unsigned_integer (buf, 8, byte_order);
1842 /* Now that we have the base address for the stack frame we can
1843 calculate the value of %rsp in the calling frame. */
1844 cache->saved_sp = cache->base + 16;
1846 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1847 frame we find it at the same offset from the reconstructed base
1848 address. If we're halfway aligning the stack, %rip is handled
1849 differently (see above). */
1850 if (!cache->frameless_p || cache->saved_sp_reg == -1)
1851 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
1853 /* Adjust all the saved registers such that they contain addresses
1854 instead of offsets. */
1855 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1856 if (cache->saved_regs[i] != -1)
1857 cache->saved_regs[i] += cache->base;
1863 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1864 struct frame_id *this_id)
1866 struct amd64_frame_cache *cache =
1867 amd64_frame_cache (this_frame, this_cache);
1869 /* This marks the outermost frame. */
1870 if (cache->base == 0)
1873 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1876 static struct value *
1877 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1880 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1881 struct amd64_frame_cache *cache =
1882 amd64_frame_cache (this_frame, this_cache);
1884 gdb_assert (regnum >= 0);
1886 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
1887 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
1889 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
1890 return frame_unwind_got_memory (this_frame, regnum,
1891 cache->saved_regs[regnum]);
1893 return frame_unwind_got_register (this_frame, regnum, regnum);
1896 static const struct frame_unwind amd64_frame_unwind =
1899 amd64_frame_this_id,
1900 amd64_frame_prev_register,
1902 default_frame_sniffer
1906 /* Signal trampolines. */
1908 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1909 64-bit variants. This would require using identical frame caches
1910 on both platforms. */
1912 static struct amd64_frame_cache *
1913 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
1915 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1916 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1917 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1918 struct amd64_frame_cache *cache;
1926 cache = amd64_alloc_frame_cache ();
1928 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1929 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
1931 addr = tdep->sigcontext_addr (this_frame);
1932 gdb_assert (tdep->sc_reg_offset);
1933 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
1934 for (i = 0; i < tdep->sc_num_regs; i++)
1935 if (tdep->sc_reg_offset[i] != -1)
1936 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
1938 *this_cache = cache;
1943 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
1944 void **this_cache, struct frame_id *this_id)
1946 struct amd64_frame_cache *cache =
1947 amd64_sigtramp_frame_cache (this_frame, this_cache);
1949 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
1952 static struct value *
1953 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
1954 void **this_cache, int regnum)
1956 /* Make sure we've initialized the cache. */
1957 amd64_sigtramp_frame_cache (this_frame, this_cache);
1959 return amd64_frame_prev_register (this_frame, this_cache, regnum);
1963 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
1964 struct frame_info *this_frame,
1967 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1969 /* We shouldn't even bother if we don't have a sigcontext_addr
1971 if (tdep->sigcontext_addr == NULL)
1974 if (tdep->sigtramp_p != NULL)
1976 if (tdep->sigtramp_p (this_frame))
1980 if (tdep->sigtramp_start != 0)
1982 CORE_ADDR pc = get_frame_pc (this_frame);
1984 gdb_assert (tdep->sigtramp_end != 0);
1985 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
1992 static const struct frame_unwind amd64_sigtramp_frame_unwind =
1995 amd64_sigtramp_frame_this_id,
1996 amd64_sigtramp_frame_prev_register,
1998 amd64_sigtramp_frame_sniffer
2003 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2005 struct amd64_frame_cache *cache =
2006 amd64_frame_cache (this_frame, this_cache);
2011 static const struct frame_base amd64_frame_base =
2013 &amd64_frame_unwind,
2014 amd64_frame_base_address,
2015 amd64_frame_base_address,
2016 amd64_frame_base_address
2019 /* Normal frames, but in a function epilogue. */
2021 /* The epilogue is defined here as the 'ret' instruction, which will
2022 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2023 the function's stack frame. */
2026 amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2030 if (target_read_memory (pc, &insn, 1))
2031 return 0; /* Can't read memory at pc. */
2033 if (insn != 0xc3) /* 'ret' instruction. */
2040 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2041 struct frame_info *this_frame,
2042 void **this_prologue_cache)
2044 if (frame_relative_level (this_frame) == 0)
2045 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
2046 get_frame_pc (this_frame));
2051 static struct amd64_frame_cache *
2052 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2054 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2055 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2056 struct amd64_frame_cache *cache;
2062 cache = amd64_alloc_frame_cache ();
2063 *this_cache = cache;
2065 /* Cache base will be %esp plus cache->sp_offset (-8). */
2066 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2067 cache->base = extract_unsigned_integer (buf, 8,
2068 byte_order) + cache->sp_offset;
2070 /* Cache pc will be the frame func. */
2071 cache->pc = get_frame_pc (this_frame);
2073 /* The saved %esp will be at cache->base plus 16. */
2074 cache->saved_sp = cache->base + 16;
2076 /* The saved %eip will be at cache->base plus 8. */
2077 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2083 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2085 struct frame_id *this_id)
2087 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2090 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2093 static const struct frame_unwind amd64_epilogue_frame_unwind =
2096 amd64_epilogue_frame_this_id,
2097 amd64_frame_prev_register,
2099 amd64_epilogue_frame_sniffer
2102 static struct frame_id
2103 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2107 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2109 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2112 /* 16 byte align the SP per frame requirements. */
2115 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2117 return sp & -(CORE_ADDR)16;
2121 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2122 in the floating-point register set REGSET to register cache
2123 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2126 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2127 int regnum, const void *fpregs, size_t len)
2129 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2131 gdb_assert (len == tdep->sizeof_fpregset);
2132 amd64_supply_fxsave (regcache, regnum, fpregs);
2135 /* Collect register REGNUM from the register cache REGCACHE and store
2136 it in the buffer specified by FPREGS and LEN as described by the
2137 floating-point register set REGSET. If REGNUM is -1, do this for
2138 all registers in REGSET. */
2141 amd64_collect_fpregset (const struct regset *regset,
2142 const struct regcache *regcache,
2143 int regnum, void *fpregs, size_t len)
2145 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2147 gdb_assert (len == tdep->sizeof_fpregset);
2148 amd64_collect_fxsave (regcache, regnum, fpregs);
2151 /* Return the appropriate register set for the core section identified
2152 by SECT_NAME and SECT_SIZE. */
2154 static const struct regset *
2155 amd64_regset_from_core_section (struct gdbarch *gdbarch,
2156 const char *sect_name, size_t sect_size)
2158 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2160 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2162 if (tdep->fpregset == NULL)
2163 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
2164 amd64_collect_fpregset);
2166 return tdep->fpregset;
2169 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2173 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2174 %rdi. We expect its value to be a pointer to the jmp_buf structure
2175 from which we extract the address that we will land at. This
2176 address is copied into PC. This routine returns non-zero on
2180 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2184 struct gdbarch *gdbarch = get_frame_arch (frame);
2185 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2186 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2188 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2189 longjmp will land. */
2190 if (jb_pc_offset == -1)
2193 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2194 jb_addr= extract_typed_address
2195 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2196 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2199 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2204 static const int amd64_record_regmap[] =
2206 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2207 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2208 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2209 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2210 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2211 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2215 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2217 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2218 const struct target_desc *tdesc = info.target_desc;
2220 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2221 floating-point registers. */
2222 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2224 if (! tdesc_has_registers (tdesc))
2225 tdesc = tdesc_amd64;
2226 tdep->tdesc = tdesc;
2228 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2229 tdep->register_names = amd64_register_names;
2231 tdep->num_byte_regs = 16;
2232 tdep->num_word_regs = 16;
2233 tdep->num_dword_regs = 16;
2234 /* Avoid wiring in the MMX registers for now. */
2235 tdep->num_mmx_regs = 0;
2237 set_gdbarch_pseudo_register_read (gdbarch,
2238 amd64_pseudo_register_read);
2239 set_gdbarch_pseudo_register_write (gdbarch,
2240 amd64_pseudo_register_write);
2242 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
2244 /* AMD64 has an FPU and 16 SSE registers. */
2245 tdep->st0_regnum = AMD64_ST0_REGNUM;
2246 tdep->num_xmm_regs = 16;
2248 /* This is what all the fuss is about. */
2249 set_gdbarch_long_bit (gdbarch, 64);
2250 set_gdbarch_long_long_bit (gdbarch, 64);
2251 set_gdbarch_ptr_bit (gdbarch, 64);
2253 /* In contrast to the i386, on AMD64 a `long double' actually takes
2254 up 128 bits, even though it's still based on the i387 extended
2255 floating-point format which has only 80 significant bits. */
2256 set_gdbarch_long_double_bit (gdbarch, 128);
2258 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2260 /* Register numbers of various important registers. */
2261 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2262 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2263 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2264 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
2266 /* The "default" register numbering scheme for AMD64 is referred to
2267 as the "DWARF Register Number Mapping" in the System V psABI.
2268 The preferred debugging format for all known AMD64 targets is
2269 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2270 DWARF-1), but we provide the same mapping just in case. This
2271 mapping is also used for stabs, which GCC does support. */
2272 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2273 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2275 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2276 be in use on any of the supported AMD64 targets. */
2278 /* Call dummy code. */
2279 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2280 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
2281 set_gdbarch_frame_red_zone_size (gdbarch, 128);
2282 tdep->call_dummy_num_integer_regs =
2283 ARRAY_SIZE (amd64_dummy_call_integer_regs);
2284 tdep->call_dummy_integer_regs = amd64_dummy_call_integer_regs;
2285 tdep->classify = amd64_classify;
2287 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
2288 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2289 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2291 set_gdbarch_return_value (gdbarch, amd64_return_value);
2293 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
2295 tdep->record_regmap = amd64_record_regmap;
2297 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
2299 /* Hook the function epilogue frame unwinder. This unwinder is
2300 appended to the list first, so that it supercedes the other
2301 unwinders in function epilogues. */
2302 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
2304 /* Hook the prologue-based frame unwinders. */
2305 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2306 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
2307 frame_base_set_default (gdbarch, &amd64_frame_base);
2309 /* If we have a register mapping, enable the generic core file support. */
2310 if (tdep->gregset_reg_offset)
2311 set_gdbarch_regset_from_core_section (gdbarch,
2312 amd64_regset_from_core_section);
2314 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
2317 /* Provide a prototype to silence -Wmissing-prototypes. */
2318 void _initialize_amd64_tdep (void);
2321 _initialize_amd64_tdep (void)
2323 initialize_tdesc_amd64 ();
2327 /* The 64-bit FXSAVE format differs from the 32-bit format in the
2328 sense that the instruction pointer and data pointer are simply
2329 64-bit offsets into the code segment and the data segment instead
2330 of a selector offset pair. The functions below store the upper 32
2331 bits of these pointers (instead of just the 16-bits of the segment
2334 /* Fill register REGNUM in REGCACHE with the appropriate
2335 floating-point or SSE register value from *FXSAVE. If REGNUM is
2336 -1, do this for all registers. This function masks off any of the
2337 reserved bits in *FXSAVE. */
2340 amd64_supply_fxsave (struct regcache *regcache, int regnum,
2343 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2344 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2346 i387_supply_fxsave (regcache, regnum, fxsave);
2348 if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
2350 const gdb_byte *regs = fxsave;
2352 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2353 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2354 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2355 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2359 /* Fill register REGNUM (if it is a floating-point or SSE register) in
2360 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2361 all registers. This function doesn't touch any of the reserved
2365 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2368 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2369 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2370 gdb_byte *regs = fxsave;
2372 i387_collect_fxsave (regcache, regnum, fxsave);
2374 if (gdbarch_ptr_bit (gdbarch) == 64)
2376 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2377 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2378 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2379 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);