1 /* Target-dependent code for AMD64.
3 Copyright (C) 2001-2014 Free Software Foundation, Inc.
5 Contributed by Jiri Smid, SuSE Labs.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "opcode/i386.h"
25 #include "arch-utils.h"
27 #include "dummy-frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
43 #include "features/i386/amd64.c"
44 #include "features/i386/amd64-avx.c"
45 #include "features/i386/amd64-mpx.c"
46 #include "features/i386/amd64-avx512.c"
48 #include "features/i386/x32.c"
49 #include "features/i386/x32-avx.c"
50 #include "features/i386/x32-avx512.c"
55 /* Note that the AMD64 architecture was previously known as x86-64.
56 The latter is (forever) engraved into the canonical system name as
57 returned by config.guess, and used as the name for the AMD64 port
58 of GNU/Linux. The BSD's have renamed their ports to amd64; they
59 don't like to shout. For GDB we prefer the amd64_-prefix over the
60 x86_64_-prefix since it's so much easier to type. */
62 /* Register information. */
64 static const char *amd64_register_names[] =
66 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
68 /* %r8 is indeed register number 8. */
69 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
70 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
72 /* %st0 is register number 24. */
73 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
74 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
76 /* %xmm0 is register number 40. */
77 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
78 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
82 static const char *amd64_ymm_names[] =
84 "ymm0", "ymm1", "ymm2", "ymm3",
85 "ymm4", "ymm5", "ymm6", "ymm7",
86 "ymm8", "ymm9", "ymm10", "ymm11",
87 "ymm12", "ymm13", "ymm14", "ymm15"
90 static const char *amd64_ymm_avx512_names[] =
92 "ymm16", "ymm17", "ymm18", "ymm19",
93 "ymm20", "ymm21", "ymm22", "ymm23",
94 "ymm24", "ymm25", "ymm26", "ymm27",
95 "ymm28", "ymm29", "ymm30", "ymm31"
98 static const char *amd64_ymmh_names[] =
100 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
101 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
102 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
103 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
106 static const char *amd64_ymmh_avx512_names[] =
108 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
109 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
110 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
111 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
114 static const char *amd64_mpx_names[] =
116 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
119 static const char *amd64_k_names[] =
121 "k0", "k1", "k2", "k3",
122 "k4", "k5", "k6", "k7"
125 static const char *amd64_zmmh_names[] =
127 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
128 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
129 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
130 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
131 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
132 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
133 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
134 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
137 static const char *amd64_zmm_names[] =
139 "zmm0", "zmm1", "zmm2", "zmm3",
140 "zmm4", "zmm5", "zmm6", "zmm7",
141 "zmm8", "zmm9", "zmm10", "zmm11",
142 "zmm12", "zmm13", "zmm14", "zmm15",
143 "zmm16", "zmm17", "zmm18", "zmm19",
144 "zmm20", "zmm21", "zmm22", "zmm23",
145 "zmm24", "zmm25", "zmm26", "zmm27",
146 "zmm28", "zmm29", "zmm30", "zmm31"
149 static const char *amd64_xmm_avx512_names[] = {
150 "xmm16", "xmm17", "xmm18", "xmm19",
151 "xmm20", "xmm21", "xmm22", "xmm23",
152 "xmm24", "xmm25", "xmm26", "xmm27",
153 "xmm28", "xmm29", "xmm30", "xmm31"
156 /* DWARF Register Number Mapping as defined in the System V psABI,
159 static int amd64_dwarf_regmap[] =
161 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
162 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
163 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
164 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
166 /* Frame Pointer Register RBP. */
169 /* Stack Pointer Register RSP. */
172 /* Extended Integer Registers 8 - 15. */
173 AMD64_R8_REGNUM, /* %r8 */
174 AMD64_R9_REGNUM, /* %r9 */
175 AMD64_R10_REGNUM, /* %r10 */
176 AMD64_R11_REGNUM, /* %r11 */
177 AMD64_R12_REGNUM, /* %r12 */
178 AMD64_R13_REGNUM, /* %r13 */
179 AMD64_R14_REGNUM, /* %r14 */
180 AMD64_R15_REGNUM, /* %r15 */
182 /* Return Address RA. Mapped to RIP. */
185 /* SSE Registers 0 - 7. */
186 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
187 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
188 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
189 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
191 /* Extended SSE Registers 8 - 15. */
192 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
193 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
194 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
195 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
197 /* Floating Point Registers 0-7. */
198 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
199 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
200 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
201 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
203 /* Control and Status Flags Register. */
206 /* Selector Registers. */
216 /* Segment Base Address Registers. */
222 /* Special Selector Registers. */
226 /* Floating Point Control Registers. */
232 static const int amd64_dwarf_regmap_len =
233 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
235 /* Convert DWARF register number REG to the appropriate register
236 number used by GDB. */
239 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
241 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
242 int ymm0_regnum = tdep->ymm0_regnum;
245 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
246 regnum = amd64_dwarf_regmap[reg];
249 warning (_("Unmapped DWARF Register #%d encountered."), reg);
250 else if (ymm0_regnum >= 0
251 && i386_xmm_regnum_p (gdbarch, regnum))
252 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
257 /* Map architectural register numbers to gdb register numbers. */
259 static const int amd64_arch_regmap[16] =
261 AMD64_RAX_REGNUM, /* %rax */
262 AMD64_RCX_REGNUM, /* %rcx */
263 AMD64_RDX_REGNUM, /* %rdx */
264 AMD64_RBX_REGNUM, /* %rbx */
265 AMD64_RSP_REGNUM, /* %rsp */
266 AMD64_RBP_REGNUM, /* %rbp */
267 AMD64_RSI_REGNUM, /* %rsi */
268 AMD64_RDI_REGNUM, /* %rdi */
269 AMD64_R8_REGNUM, /* %r8 */
270 AMD64_R9_REGNUM, /* %r9 */
271 AMD64_R10_REGNUM, /* %r10 */
272 AMD64_R11_REGNUM, /* %r11 */
273 AMD64_R12_REGNUM, /* %r12 */
274 AMD64_R13_REGNUM, /* %r13 */
275 AMD64_R14_REGNUM, /* %r14 */
276 AMD64_R15_REGNUM /* %r15 */
279 static const int amd64_arch_regmap_len =
280 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
282 /* Convert architectural register number REG to the appropriate register
283 number used by GDB. */
286 amd64_arch_reg_to_regnum (int reg)
288 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
290 return amd64_arch_regmap[reg];
293 /* Register names for byte pseudo-registers. */
295 static const char *amd64_byte_names[] =
297 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
298 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
299 "ah", "bh", "ch", "dh"
302 /* Number of lower byte registers. */
303 #define AMD64_NUM_LOWER_BYTE_REGS 16
305 /* Register names for word pseudo-registers. */
307 static const char *amd64_word_names[] =
309 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
310 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
313 /* Register names for dword pseudo-registers. */
315 static const char *amd64_dword_names[] =
317 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
318 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
322 /* Return the name of register REGNUM. */
325 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
327 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
328 if (i386_byte_regnum_p (gdbarch, regnum))
329 return amd64_byte_names[regnum - tdep->al_regnum];
330 else if (i386_zmm_regnum_p (gdbarch, regnum))
331 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
332 else if (i386_ymm_regnum_p (gdbarch, regnum))
333 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
334 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
335 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
336 else if (i386_word_regnum_p (gdbarch, regnum))
337 return amd64_word_names[regnum - tdep->ax_regnum];
338 else if (i386_dword_regnum_p (gdbarch, regnum))
339 return amd64_dword_names[regnum - tdep->eax_regnum];
341 return i386_pseudo_register_name (gdbarch, regnum);
344 static struct value *
345 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
346 struct regcache *regcache,
349 gdb_byte raw_buf[MAX_REGISTER_SIZE];
350 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
351 enum register_status status;
352 struct value *result_value;
355 result_value = allocate_value (register_type (gdbarch, regnum));
356 VALUE_LVAL (result_value) = lval_register;
357 VALUE_REGNUM (result_value) = regnum;
358 buf = value_contents_raw (result_value);
360 if (i386_byte_regnum_p (gdbarch, regnum))
362 int gpnum = regnum - tdep->al_regnum;
364 /* Extract (always little endian). */
365 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
367 /* Special handling for AH, BH, CH, DH. */
368 status = regcache_raw_read (regcache,
369 gpnum - AMD64_NUM_LOWER_BYTE_REGS,
371 if (status == REG_VALID)
372 memcpy (buf, raw_buf + 1, 1);
374 mark_value_bytes_unavailable (result_value, 0,
375 TYPE_LENGTH (value_type (result_value)));
379 status = regcache_raw_read (regcache, gpnum, raw_buf);
380 if (status == REG_VALID)
381 memcpy (buf, raw_buf, 1);
383 mark_value_bytes_unavailable (result_value, 0,
384 TYPE_LENGTH (value_type (result_value)));
387 else if (i386_dword_regnum_p (gdbarch, regnum))
389 int gpnum = regnum - tdep->eax_regnum;
390 /* Extract (always little endian). */
391 status = regcache_raw_read (regcache, gpnum, raw_buf);
392 if (status == REG_VALID)
393 memcpy (buf, raw_buf, 4);
395 mark_value_bytes_unavailable (result_value, 0,
396 TYPE_LENGTH (value_type (result_value)));
399 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
406 amd64_pseudo_register_write (struct gdbarch *gdbarch,
407 struct regcache *regcache,
408 int regnum, const gdb_byte *buf)
410 gdb_byte raw_buf[MAX_REGISTER_SIZE];
411 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
413 if (i386_byte_regnum_p (gdbarch, regnum))
415 int gpnum = regnum - tdep->al_regnum;
417 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
419 /* Read ... AH, BH, CH, DH. */
420 regcache_raw_read (regcache,
421 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
422 /* ... Modify ... (always little endian). */
423 memcpy (raw_buf + 1, buf, 1);
425 regcache_raw_write (regcache,
426 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
431 regcache_raw_read (regcache, gpnum, raw_buf);
432 /* ... Modify ... (always little endian). */
433 memcpy (raw_buf, buf, 1);
435 regcache_raw_write (regcache, gpnum, raw_buf);
438 else if (i386_dword_regnum_p (gdbarch, regnum))
440 int gpnum = regnum - tdep->eax_regnum;
443 regcache_raw_read (regcache, gpnum, raw_buf);
444 /* ... Modify ... (always little endian). */
445 memcpy (raw_buf, buf, 4);
447 regcache_raw_write (regcache, gpnum, raw_buf);
450 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
455 /* Register classes as defined in the psABI. */
469 /* Return the union class of CLASS1 and CLASS2. See the psABI for
472 static enum amd64_reg_class
473 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
475 /* Rule (a): If both classes are equal, this is the resulting class. */
476 if (class1 == class2)
479 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
480 is the other class. */
481 if (class1 == AMD64_NO_CLASS)
483 if (class2 == AMD64_NO_CLASS)
486 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
487 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
490 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
491 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
492 return AMD64_INTEGER;
494 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
495 MEMORY is used as class. */
496 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
497 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
498 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
501 /* Rule (f): Otherwise class SSE is used. */
505 static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
507 /* Return non-zero if TYPE is a non-POD structure or union type. */
510 amd64_non_pod_p (struct type *type)
512 /* ??? A class with a base class certainly isn't POD, but does this
513 catch all non-POD structure types? */
514 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
520 /* Classify TYPE according to the rules for aggregate (structures and
521 arrays) and union types, and store the result in CLASS. */
524 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
526 /* 1. If the size of an object is larger than two eightbytes, or in
527 C++, is a non-POD structure or union type, or contains
528 unaligned fields, it has class memory. */
529 if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
531 class[0] = class[1] = AMD64_MEMORY;
535 /* 2. Both eightbytes get initialized to class NO_CLASS. */
536 class[0] = class[1] = AMD64_NO_CLASS;
538 /* 3. Each field of an object is classified recursively so that
539 always two fields are considered. The resulting class is
540 calculated according to the classes of the fields in the
543 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
545 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
547 /* All fields in an array have the same type. */
548 amd64_classify (subtype, class);
549 if (TYPE_LENGTH (type) > 8 && class[1] == AMD64_NO_CLASS)
556 /* Structure or union. */
557 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
558 || TYPE_CODE (type) == TYPE_CODE_UNION);
560 for (i = 0; i < TYPE_NFIELDS (type); i++)
562 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
563 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
564 enum amd64_reg_class subclass[2];
565 int bitsize = TYPE_FIELD_BITSIZE (type, i);
569 bitsize = TYPE_LENGTH (subtype) * 8;
570 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
572 /* Ignore static fields. */
573 if (field_is_static (&TYPE_FIELD (type, i)))
576 gdb_assert (pos == 0 || pos == 1);
578 amd64_classify (subtype, subclass);
579 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
580 if (bitsize <= 64 && pos == 0 && endpos == 1)
581 /* This is a bit of an odd case: We have a field that would
582 normally fit in one of the two eightbytes, except that
583 it is placed in a way that this field straddles them.
584 This has been seen with a structure containing an array.
586 The ABI is a bit unclear in this case, but we assume that
587 this field's class (stored in subclass[0]) must also be merged
588 into class[1]. In other words, our field has a piece stored
589 in the second eight-byte, and thus its class applies to
590 the second eight-byte as well.
592 In the case where the field length exceeds 8 bytes,
593 it should not be necessary to merge the field class
594 into class[1]. As LEN > 8, subclass[1] is necessarily
595 different from AMD64_NO_CLASS. If subclass[1] is equal
596 to subclass[0], then the normal class[1]/subclass[1]
597 merging will take care of everything. For subclass[1]
598 to be different from subclass[0], I can only see the case
599 where we have a SSE/SSEUP or X87/X87UP pair, which both
600 use up all 16 bytes of the aggregate, and are already
601 handled just fine (because each portion sits on its own
603 class[1] = amd64_merge_classes (class[1], subclass[0]);
605 class[1] = amd64_merge_classes (class[1], subclass[1]);
609 /* 4. Then a post merger cleanup is done: */
611 /* Rule (a): If one of the classes is MEMORY, the whole argument is
613 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
614 class[0] = class[1] = AMD64_MEMORY;
616 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
618 if (class[0] == AMD64_SSEUP)
619 class[0] = AMD64_SSE;
620 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
621 class[1] = AMD64_SSE;
624 /* Classify TYPE, and store the result in CLASS. */
627 amd64_classify (struct type *type, enum amd64_reg_class class[2])
629 enum type_code code = TYPE_CODE (type);
630 int len = TYPE_LENGTH (type);
632 class[0] = class[1] = AMD64_NO_CLASS;
634 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
635 long, long long, and pointers are in the INTEGER class. Similarly,
636 range types, used by languages such as Ada, are also in the INTEGER
638 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
639 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
640 || code == TYPE_CODE_CHAR
641 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
642 && (len == 1 || len == 2 || len == 4 || len == 8))
643 class[0] = AMD64_INTEGER;
645 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
647 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
648 && (len == 4 || len == 8))
650 class[0] = AMD64_SSE;
652 /* Arguments of types __float128, _Decimal128 and __m128 are split into
653 two halves. The least significant ones belong to class SSE, the most
654 significant one to class SSEUP. */
655 else if (code == TYPE_CODE_DECFLOAT && len == 16)
656 /* FIXME: __float128, __m128. */
657 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
659 /* The 64-bit mantissa of arguments of type long double belongs to
660 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
662 else if (code == TYPE_CODE_FLT && len == 16)
663 /* Class X87 and X87UP. */
664 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
666 /* Arguments of complex T where T is one of the types float or
667 double get treated as if they are implemented as:
675 else if (code == TYPE_CODE_COMPLEX && len == 8)
676 class[0] = AMD64_SSE;
677 else if (code == TYPE_CODE_COMPLEX && len == 16)
678 class[0] = class[1] = AMD64_SSE;
680 /* A variable of type complex long double is classified as type
682 else if (code == TYPE_CODE_COMPLEX && len == 32)
683 class[0] = AMD64_COMPLEX_X87;
686 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
687 || code == TYPE_CODE_UNION)
688 amd64_classify_aggregate (type, class);
691 static enum return_value_convention
692 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
693 struct type *type, struct regcache *regcache,
694 gdb_byte *readbuf, const gdb_byte *writebuf)
696 enum amd64_reg_class class[2];
697 int len = TYPE_LENGTH (type);
698 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
699 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
704 gdb_assert (!(readbuf && writebuf));
706 /* 1. Classify the return type with the classification algorithm. */
707 amd64_classify (type, class);
709 /* 2. If the type has class MEMORY, then the caller provides space
710 for the return value and passes the address of this storage in
711 %rdi as if it were the first argument to the function. In effect,
712 this address becomes a hidden first argument.
714 On return %rax will contain the address that has been passed in
715 by the caller in %rdi. */
716 if (class[0] == AMD64_MEMORY)
718 /* As indicated by the comment above, the ABI guarantees that we
719 can always find the return value just after the function has
726 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
727 read_memory (addr, readbuf, TYPE_LENGTH (type));
730 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
733 /* 8. If the class is COMPLEX_X87, the real part of the value is
734 returned in %st0 and the imaginary part in %st1. */
735 if (class[0] == AMD64_COMPLEX_X87)
739 regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
740 regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
745 i387_return_value (gdbarch, regcache);
746 regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
747 regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
749 /* Fix up the tag word such that both %st(0) and %st(1) are
751 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
754 return RETURN_VALUE_REGISTER_CONVENTION;
757 gdb_assert (class[1] != AMD64_MEMORY);
758 gdb_assert (len <= 16);
760 for (i = 0; len > 0; i++, len -= 8)
768 /* 3. If the class is INTEGER, the next available register
769 of the sequence %rax, %rdx is used. */
770 regnum = integer_regnum[integer_reg++];
774 /* 4. If the class is SSE, the next available SSE register
775 of the sequence %xmm0, %xmm1 is used. */
776 regnum = sse_regnum[sse_reg++];
780 /* 5. If the class is SSEUP, the eightbyte is passed in the
781 upper half of the last used SSE register. */
782 gdb_assert (sse_reg > 0);
783 regnum = sse_regnum[sse_reg - 1];
788 /* 6. If the class is X87, the value is returned on the X87
789 stack in %st0 as 80-bit x87 number. */
790 regnum = AMD64_ST0_REGNUM;
792 i387_return_value (gdbarch, regcache);
796 /* 7. If the class is X87UP, the value is returned together
797 with the previous X87 value in %st0. */
798 gdb_assert (i > 0 && class[0] == AMD64_X87);
799 regnum = AMD64_ST0_REGNUM;
808 gdb_assert (!"Unexpected register class.");
811 gdb_assert (regnum != -1);
814 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
817 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
821 return RETURN_VALUE_REGISTER_CONVENTION;
826 amd64_push_arguments (struct regcache *regcache, int nargs,
827 struct value **args, CORE_ADDR sp, int struct_return)
829 static int integer_regnum[] =
831 AMD64_RDI_REGNUM, /* %rdi */
832 AMD64_RSI_REGNUM, /* %rsi */
833 AMD64_RDX_REGNUM, /* %rdx */
834 AMD64_RCX_REGNUM, /* %rcx */
835 AMD64_R8_REGNUM, /* %r8 */
836 AMD64_R9_REGNUM /* %r9 */
838 static int sse_regnum[] =
840 /* %xmm0 ... %xmm7 */
841 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
842 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
843 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
844 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
846 struct value **stack_args = alloca (nargs * sizeof (struct value *));
847 int num_stack_args = 0;
848 int num_elements = 0;
854 /* Reserve a register for the "hidden" argument. */
858 for (i = 0; i < nargs; i++)
860 struct type *type = value_type (args[i]);
861 int len = TYPE_LENGTH (type);
862 enum amd64_reg_class class[2];
863 int needed_integer_regs = 0;
864 int needed_sse_regs = 0;
867 /* Classify argument. */
868 amd64_classify (type, class);
870 /* Calculate the number of integer and SSE registers needed for
872 for (j = 0; j < 2; j++)
874 if (class[j] == AMD64_INTEGER)
875 needed_integer_regs++;
876 else if (class[j] == AMD64_SSE)
880 /* Check whether enough registers are available, and if the
881 argument should be passed in registers at all. */
882 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
883 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
884 || (needed_integer_regs == 0 && needed_sse_regs == 0))
886 /* The argument will be passed on the stack. */
887 num_elements += ((len + 7) / 8);
888 stack_args[num_stack_args++] = args[i];
892 /* The argument will be passed in registers. */
893 const gdb_byte *valbuf = value_contents (args[i]);
896 gdb_assert (len <= 16);
898 for (j = 0; len > 0; j++, len -= 8)
906 regnum = integer_regnum[integer_reg++];
910 regnum = sse_regnum[sse_reg++];
914 gdb_assert (sse_reg > 0);
915 regnum = sse_regnum[sse_reg - 1];
920 gdb_assert (!"Unexpected register class.");
923 gdb_assert (regnum != -1);
924 memset (buf, 0, sizeof buf);
925 memcpy (buf, valbuf + j * 8, min (len, 8));
926 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
931 /* Allocate space for the arguments on the stack. */
932 sp -= num_elements * 8;
934 /* The psABI says that "The end of the input argument area shall be
935 aligned on a 16 byte boundary." */
938 /* Write out the arguments to the stack. */
939 for (i = 0; i < num_stack_args; i++)
941 struct type *type = value_type (stack_args[i]);
942 const gdb_byte *valbuf = value_contents (stack_args[i]);
943 int len = TYPE_LENGTH (type);
945 write_memory (sp + element * 8, valbuf, len);
946 element += ((len + 7) / 8);
949 /* The psABI says that "For calls that may call functions that use
950 varargs or stdargs (prototype-less calls or calls to functions
951 containing ellipsis (...) in the declaration) %al is used as
952 hidden argument to specify the number of SSE registers used. */
953 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
958 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
959 struct regcache *regcache, CORE_ADDR bp_addr,
960 int nargs, struct value **args, CORE_ADDR sp,
961 int struct_return, CORE_ADDR struct_addr)
963 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
966 /* Pass arguments. */
967 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
969 /* Pass "hidden" argument". */
972 store_unsigned_integer (buf, 8, byte_order, struct_addr);
973 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
976 /* Store return address. */
978 store_unsigned_integer (buf, 8, byte_order, bp_addr);
979 write_memory (sp, buf, 8);
981 /* Finally, update the stack pointer... */
982 store_unsigned_integer (buf, 8, byte_order, sp);
983 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
985 /* ...and fake a frame pointer. */
986 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
991 /* Displaced instruction handling. */
993 /* A partially decoded instruction.
994 This contains enough details for displaced stepping purposes. */
998 /* The number of opcode bytes. */
1000 /* The offset of the rex prefix or -1 if not present. */
1002 /* The offset to the first opcode byte. */
1004 /* The offset to the modrm byte or -1 if not present. */
1007 /* The raw instruction. */
1011 struct displaced_step_closure
1013 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1018 /* Details of the instruction. */
1019 struct amd64_insn insn_details;
1021 /* Amount of space allocated to insn_buf. */
1024 /* The possibly modified insn.
1025 This is a variable-length field. */
1026 gdb_byte insn_buf[1];
1029 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1030 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1031 at which point delete these in favor of libopcodes' versions). */
1033 static const unsigned char onebyte_has_modrm[256] = {
1034 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1035 /* ------------------------------- */
1036 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1037 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1038 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1039 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1040 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1041 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1042 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1043 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1044 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1045 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1046 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1047 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1048 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1049 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1050 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1051 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1052 /* ------------------------------- */
1053 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1056 static const unsigned char twobyte_has_modrm[256] = {
1057 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1058 /* ------------------------------- */
1059 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1060 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1061 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1062 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1063 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1064 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1065 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1066 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1067 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1068 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1069 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1070 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1071 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1072 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1073 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1074 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1075 /* ------------------------------- */
1076 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1079 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1082 rex_prefix_p (gdb_byte pfx)
1084 return REX_PREFIX_P (pfx);
1087 /* Skip the legacy instruction prefixes in INSN.
1088 We assume INSN is properly sentineled so we don't have to worry
1089 about falling off the end of the buffer. */
1092 amd64_skip_prefixes (gdb_byte *insn)
1098 case DATA_PREFIX_OPCODE:
1099 case ADDR_PREFIX_OPCODE:
1100 case CS_PREFIX_OPCODE:
1101 case DS_PREFIX_OPCODE:
1102 case ES_PREFIX_OPCODE:
1103 case FS_PREFIX_OPCODE:
1104 case GS_PREFIX_OPCODE:
1105 case SS_PREFIX_OPCODE:
1106 case LOCK_PREFIX_OPCODE:
1107 case REPE_PREFIX_OPCODE:
1108 case REPNE_PREFIX_OPCODE:
1120 /* Return an integer register (other than RSP) that is unused as an input
1122 In order to not require adding a rex prefix if the insn doesn't already
1123 have one, the result is restricted to RAX ... RDI, sans RSP.
1124 The register numbering of the result follows architecture ordering,
1128 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1130 /* 1 bit for each reg */
1131 int used_regs_mask = 0;
1133 /* There can be at most 3 int regs used as inputs in an insn, and we have
1134 7 to choose from (RAX ... RDI, sans RSP).
1135 This allows us to take a conservative approach and keep things simple.
1136 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1137 that implicitly specify RAX. */
1140 used_regs_mask |= 1 << EAX_REG_NUM;
1141 /* Similarily avoid RDX, implicit operand in divides. */
1142 used_regs_mask |= 1 << EDX_REG_NUM;
1144 used_regs_mask |= 1 << ESP_REG_NUM;
1146 /* If the opcode is one byte long and there's no ModRM byte,
1147 assume the opcode specifies a register. */
1148 if (details->opcode_len == 1 && details->modrm_offset == -1)
1149 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1151 /* Mark used regs in the modrm/sib bytes. */
1152 if (details->modrm_offset != -1)
1154 int modrm = details->raw_insn[details->modrm_offset];
1155 int mod = MODRM_MOD_FIELD (modrm);
1156 int reg = MODRM_REG_FIELD (modrm);
1157 int rm = MODRM_RM_FIELD (modrm);
1158 int have_sib = mod != 3 && rm == 4;
1160 /* Assume the reg field of the modrm byte specifies a register. */
1161 used_regs_mask |= 1 << reg;
1165 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1166 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1167 used_regs_mask |= 1 << base;
1168 used_regs_mask |= 1 << idx;
1172 used_regs_mask |= 1 << rm;
1176 gdb_assert (used_regs_mask < 256);
1177 gdb_assert (used_regs_mask != 255);
1179 /* Finally, find a free reg. */
1183 for (i = 0; i < 8; ++i)
1185 if (! (used_regs_mask & (1 << i)))
1189 /* We shouldn't get here. */
1190 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1194 /* Extract the details of INSN that we need. */
1197 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1199 gdb_byte *start = insn;
1202 details->raw_insn = insn;
1204 details->opcode_len = -1;
1205 details->rex_offset = -1;
1206 details->opcode_offset = -1;
1207 details->modrm_offset = -1;
1209 /* Skip legacy instruction prefixes. */
1210 insn = amd64_skip_prefixes (insn);
1212 /* Skip REX instruction prefix. */
1213 if (rex_prefix_p (*insn))
1215 details->rex_offset = insn - start;
1219 details->opcode_offset = insn - start;
1221 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1223 /* Two or three-byte opcode. */
1225 need_modrm = twobyte_has_modrm[*insn];
1227 /* Check for three-byte opcode. */
1237 details->opcode_len = 3;
1240 details->opcode_len = 2;
1246 /* One-byte opcode. */
1247 need_modrm = onebyte_has_modrm[*insn];
1248 details->opcode_len = 1;
1254 details->modrm_offset = insn - start;
1258 /* Update %rip-relative addressing in INSN.
1260 %rip-relative addressing only uses a 32-bit displacement.
1261 32 bits is not enough to be guaranteed to cover the distance between where
1262 the real instruction is and where its copy is.
1263 Convert the insn to use base+disp addressing.
1264 We set base = pc + insn_length so we can leave disp unchanged. */
1267 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1268 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1270 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1271 const struct amd64_insn *insn_details = &dsc->insn_details;
1272 int modrm_offset = insn_details->modrm_offset;
1273 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1277 int arch_tmp_regno, tmp_regno;
1278 ULONGEST orig_value;
1280 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1283 /* Compute the rip-relative address. */
1284 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1285 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
1286 dsc->max_len, from);
1287 rip_base = from + insn_length;
1289 /* We need a register to hold the address.
1290 Pick one not used in the insn.
1291 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1292 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1293 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1295 /* REX.B should be unset as we were using rip-relative addressing,
1296 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1297 if (insn_details->rex_offset != -1)
1298 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1300 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1301 dsc->tmp_regno = tmp_regno;
1302 dsc->tmp_save = orig_value;
1305 /* Convert the ModRM field to be base+disp. */
1306 dsc->insn_buf[modrm_offset] &= ~0xc7;
1307 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1309 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1311 if (debug_displaced)
1312 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1313 "displaced: using temp reg %d, old value %s, new value %s\n",
1314 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1315 paddress (gdbarch, rip_base));
1319 fixup_displaced_copy (struct gdbarch *gdbarch,
1320 struct displaced_step_closure *dsc,
1321 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1323 const struct amd64_insn *details = &dsc->insn_details;
1325 if (details->modrm_offset != -1)
1327 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1329 if ((modrm & 0xc7) == 0x05)
1331 /* The insn uses rip-relative addressing.
1333 fixup_riprel (gdbarch, dsc, from, to, regs);
1338 struct displaced_step_closure *
1339 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1340 CORE_ADDR from, CORE_ADDR to,
1341 struct regcache *regs)
1343 int len = gdbarch_max_insn_length (gdbarch);
1344 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1345 continually watch for running off the end of the buffer. */
1346 int fixup_sentinel_space = len;
1347 struct displaced_step_closure *dsc =
1348 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1349 gdb_byte *buf = &dsc->insn_buf[0];
1350 struct amd64_insn *details = &dsc->insn_details;
1353 dsc->max_len = len + fixup_sentinel_space;
1355 read_memory (from, buf, len);
1357 /* Set up the sentinel space so we don't have to worry about running
1358 off the end of the buffer. An excessive number of leading prefixes
1359 could otherwise cause this. */
1360 memset (buf + len, 0, fixup_sentinel_space);
1362 amd64_get_insn_details (buf, details);
1364 /* GDB may get control back after the insn after the syscall.
1365 Presumably this is a kernel bug.
1366 If this is a syscall, make sure there's a nop afterwards. */
1370 if (amd64_syscall_p (details, &syscall_length))
1371 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1374 /* Modify the insn to cope with the address where it will be executed from.
1375 In particular, handle any rip-relative addressing. */
1376 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1378 write_memory (to, buf, len);
1380 if (debug_displaced)
1382 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1383 paddress (gdbarch, from), paddress (gdbarch, to));
1384 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1391 amd64_absolute_jmp_p (const struct amd64_insn *details)
1393 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1395 if (insn[0] == 0xff)
1397 /* jump near, absolute indirect (/4) */
1398 if ((insn[1] & 0x38) == 0x20)
1401 /* jump far, absolute indirect (/5) */
1402 if ((insn[1] & 0x38) == 0x28)
1409 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1412 amd64_jmp_p (const struct amd64_insn *details)
1414 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1416 /* jump short, relative. */
1417 if (insn[0] == 0xeb)
1420 /* jump near, relative. */
1421 if (insn[0] == 0xe9)
1424 return amd64_absolute_jmp_p (details);
1428 amd64_absolute_call_p (const struct amd64_insn *details)
1430 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1432 if (insn[0] == 0xff)
1434 /* Call near, absolute indirect (/2) */
1435 if ((insn[1] & 0x38) == 0x10)
1438 /* Call far, absolute indirect (/3) */
1439 if ((insn[1] & 0x38) == 0x18)
1447 amd64_ret_p (const struct amd64_insn *details)
1449 /* NOTE: gcc can emit "repz ; ret". */
1450 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1454 case 0xc2: /* ret near, pop N bytes */
1455 case 0xc3: /* ret near */
1456 case 0xca: /* ret far, pop N bytes */
1457 case 0xcb: /* ret far */
1458 case 0xcf: /* iret */
1467 amd64_call_p (const struct amd64_insn *details)
1469 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1471 if (amd64_absolute_call_p (details))
1474 /* call near, relative */
1475 if (insn[0] == 0xe8)
1481 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1482 length in bytes. Otherwise, return zero. */
1485 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1487 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1489 if (insn[0] == 0x0f && insn[1] == 0x05)
1498 /* Classify the instruction at ADDR using PRED.
1499 Throw an error if the memory can't be read. */
1502 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1503 int (*pred) (const struct amd64_insn *))
1505 struct amd64_insn details;
1507 int len, classification;
1509 len = gdbarch_max_insn_length (gdbarch);
1512 read_code (addr, buf, len);
1513 amd64_get_insn_details (buf, &details);
1515 classification = pred (&details);
1517 return classification;
1520 /* The gdbarch insn_is_call method. */
1523 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1525 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1528 /* The gdbarch insn_is_ret method. */
1531 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1533 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1536 /* The gdbarch insn_is_jump method. */
1539 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1541 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1544 /* Fix up the state of registers and memory after having single-stepped
1545 a displaced instruction. */
1548 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1549 struct displaced_step_closure *dsc,
1550 CORE_ADDR from, CORE_ADDR to,
1551 struct regcache *regs)
1553 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1554 /* The offset we applied to the instruction's address. */
1555 ULONGEST insn_offset = to - from;
1556 gdb_byte *insn = dsc->insn_buf;
1557 const struct amd64_insn *insn_details = &dsc->insn_details;
1559 if (debug_displaced)
1560 fprintf_unfiltered (gdb_stdlog,
1561 "displaced: fixup (%s, %s), "
1562 "insn = 0x%02x 0x%02x ...\n",
1563 paddress (gdbarch, from), paddress (gdbarch, to),
1566 /* If we used a tmp reg, restore it. */
1570 if (debug_displaced)
1571 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1572 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1573 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1576 /* The list of issues to contend with here is taken from
1577 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1578 Yay for Free Software! */
1580 /* Relocate the %rip back to the program's instruction stream,
1583 /* Except in the case of absolute or indirect jump or call
1584 instructions, or a return instruction, the new rip is relative to
1585 the displaced instruction; make it relative to the original insn.
1586 Well, signal handler returns don't need relocation either, but we use the
1587 value of %rip to recognize those; see below. */
1588 if (! amd64_absolute_jmp_p (insn_details)
1589 && ! amd64_absolute_call_p (insn_details)
1590 && ! amd64_ret_p (insn_details))
1595 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1597 /* A signal trampoline system call changes the %rip, resuming
1598 execution of the main program after the signal handler has
1599 returned. That makes them like 'return' instructions; we
1600 shouldn't relocate %rip.
1602 But most system calls don't, and we do need to relocate %rip.
1604 Our heuristic for distinguishing these cases: if stepping
1605 over the system call instruction left control directly after
1606 the instruction, the we relocate --- control almost certainly
1607 doesn't belong in the displaced copy. Otherwise, we assume
1608 the instruction has put control where it belongs, and leave
1609 it unrelocated. Goodness help us if there are PC-relative
1611 if (amd64_syscall_p (insn_details, &insn_len)
1612 && orig_rip != to + insn_len
1613 /* GDB can get control back after the insn after the syscall.
1614 Presumably this is a kernel bug.
1615 Fixup ensures its a nop, we add one to the length for it. */
1616 && orig_rip != to + insn_len + 1)
1618 if (debug_displaced)
1619 fprintf_unfiltered (gdb_stdlog,
1620 "displaced: syscall changed %%rip; "
1621 "not relocating\n");
1625 ULONGEST rip = orig_rip - insn_offset;
1627 /* If we just stepped over a breakpoint insn, we don't backup
1628 the pc on purpose; this is to match behaviour without
1631 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1633 if (debug_displaced)
1634 fprintf_unfiltered (gdb_stdlog,
1636 "relocated %%rip from %s to %s\n",
1637 paddress (gdbarch, orig_rip),
1638 paddress (gdbarch, rip));
1642 /* If the instruction was PUSHFL, then the TF bit will be set in the
1643 pushed value, and should be cleared. We'll leave this for later,
1644 since GDB already messes up the TF flag when stepping over a
1647 /* If the instruction was a call, the return address now atop the
1648 stack is the address following the copied instruction. We need
1649 to make it the address following the original instruction. */
1650 if (amd64_call_p (insn_details))
1654 const ULONGEST retaddr_len = 8;
1656 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1657 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1658 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1659 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1661 if (debug_displaced)
1662 fprintf_unfiltered (gdb_stdlog,
1663 "displaced: relocated return addr at %s "
1665 paddress (gdbarch, rsp),
1666 paddress (gdbarch, retaddr));
1670 /* If the instruction INSN uses RIP-relative addressing, return the
1671 offset into the raw INSN where the displacement to be adjusted is
1672 found. Returns 0 if the instruction doesn't use RIP-relative
1676 rip_relative_offset (struct amd64_insn *insn)
1678 if (insn->modrm_offset != -1)
1680 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1682 if ((modrm & 0xc7) == 0x05)
1684 /* The displacement is found right after the ModRM byte. */
1685 return insn->modrm_offset + 1;
1693 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1695 target_write_memory (*to, buf, len);
1700 amd64_relocate_instruction (struct gdbarch *gdbarch,
1701 CORE_ADDR *to, CORE_ADDR oldloc)
1703 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1704 int len = gdbarch_max_insn_length (gdbarch);
1705 /* Extra space for sentinels. */
1706 int fixup_sentinel_space = len;
1707 gdb_byte *buf = xmalloc (len + fixup_sentinel_space);
1708 struct amd64_insn insn_details;
1710 LONGEST rel32, newrel;
1714 read_memory (oldloc, buf, len);
1716 /* Set up the sentinel space so we don't have to worry about running
1717 off the end of the buffer. An excessive number of leading prefixes
1718 could otherwise cause this. */
1719 memset (buf + len, 0, fixup_sentinel_space);
1722 amd64_get_insn_details (insn, &insn_details);
1724 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1726 /* Skip legacy instruction prefixes. */
1727 insn = amd64_skip_prefixes (insn);
1729 /* Adjust calls with 32-bit relative addresses as push/jump, with
1730 the address pushed being the location where the original call in
1731 the user program would return to. */
1732 if (insn[0] == 0xe8)
1734 gdb_byte push_buf[16];
1735 unsigned int ret_addr;
1737 /* Where "ret" in the original code will return to. */
1738 ret_addr = oldloc + insn_length;
1739 push_buf[0] = 0x68; /* pushq $... */
1740 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1741 /* Push the push. */
1742 append_insns (to, 5, push_buf);
1744 /* Convert the relative call to a relative jump. */
1747 /* Adjust the destination offset. */
1748 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1749 newrel = (oldloc - *to) + rel32;
1750 store_signed_integer (insn + 1, 4, byte_order, newrel);
1752 if (debug_displaced)
1753 fprintf_unfiltered (gdb_stdlog,
1754 "Adjusted insn rel32=%s at %s to"
1755 " rel32=%s at %s\n",
1756 hex_string (rel32), paddress (gdbarch, oldloc),
1757 hex_string (newrel), paddress (gdbarch, *to));
1759 /* Write the adjusted jump into its displaced location. */
1760 append_insns (to, 5, insn);
1764 offset = rip_relative_offset (&insn_details);
1767 /* Adjust jumps with 32-bit relative addresses. Calls are
1768 already handled above. */
1769 if (insn[0] == 0xe9)
1771 /* Adjust conditional jumps. */
1772 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1778 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1779 newrel = (oldloc - *to) + rel32;
1780 store_signed_integer (insn + offset, 4, byte_order, newrel);
1781 if (debug_displaced)
1782 fprintf_unfiltered (gdb_stdlog,
1783 "Adjusted insn rel32=%s at %s to"
1784 " rel32=%s at %s\n",
1785 hex_string (rel32), paddress (gdbarch, oldloc),
1786 hex_string (newrel), paddress (gdbarch, *to));
1789 /* Write the adjusted instruction into its displaced location. */
1790 append_insns (to, insn_length, buf);
1794 /* The maximum number of saved registers. This should include %rip. */
1795 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1797 struct amd64_frame_cache
1802 CORE_ADDR sp_offset;
1805 /* Saved registers. */
1806 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1810 /* Do we have a frame? */
1814 /* Initialize a frame cache. */
1817 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1824 cache->sp_offset = -8;
1827 /* Saved registers. We initialize these to -1 since zero is a valid
1828 offset (that's where %rbp is supposed to be stored).
1829 The values start out as being offsets, and are later converted to
1830 addresses (at which point -1 is interpreted as an address, still meaning
1832 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1833 cache->saved_regs[i] = -1;
1834 cache->saved_sp = 0;
1835 cache->saved_sp_reg = -1;
1837 /* Frameless until proven otherwise. */
1838 cache->frameless_p = 1;
1841 /* Allocate and initialize a frame cache. */
1843 static struct amd64_frame_cache *
1844 amd64_alloc_frame_cache (void)
1846 struct amd64_frame_cache *cache;
1848 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1849 amd64_init_frame_cache (cache);
1853 /* GCC 4.4 and later, can put code in the prologue to realign the
1854 stack pointer. Check whether PC points to such code, and update
1855 CACHE accordingly. Return the first instruction after the code
1856 sequence or CURRENT_PC, whichever is smaller. If we don't
1857 recognize the code, return PC. */
1860 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1861 struct amd64_frame_cache *cache)
1863 /* There are 2 code sequences to re-align stack before the frame
1866 1. Use a caller-saved saved register:
1872 2. Use a callee-saved saved register:
1879 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1881 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1882 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1887 int offset, offset_and;
1889 if (target_read_code (pc, buf, sizeof buf))
1892 /* Check caller-saved saved register. The first instruction has
1893 to be "leaq 8(%rsp), %reg". */
1894 if ((buf[0] & 0xfb) == 0x48
1899 /* MOD must be binary 10 and R/M must be binary 100. */
1900 if ((buf[2] & 0xc7) != 0x44)
1903 /* REG has register number. */
1904 reg = (buf[2] >> 3) & 7;
1906 /* Check the REX.R bit. */
1914 /* Check callee-saved saved register. The first instruction
1915 has to be "pushq %reg". */
1917 if ((buf[0] & 0xf8) == 0x50)
1919 else if ((buf[0] & 0xf6) == 0x40
1920 && (buf[1] & 0xf8) == 0x50)
1922 /* Check the REX.B bit. */
1923 if ((buf[0] & 1) != 0)
1932 reg += buf[offset] & 0x7;
1936 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1937 if ((buf[offset] & 0xfb) != 0x48
1938 || buf[offset + 1] != 0x8d
1939 || buf[offset + 3] != 0x24
1940 || buf[offset + 4] != 0x10)
1943 /* MOD must be binary 10 and R/M must be binary 100. */
1944 if ((buf[offset + 2] & 0xc7) != 0x44)
1947 /* REG has register number. */
1948 r = (buf[offset + 2] >> 3) & 7;
1950 /* Check the REX.R bit. */
1951 if (buf[offset] == 0x4c)
1954 /* Registers in pushq and leaq have to be the same. */
1961 /* Rigister can't be %rsp nor %rbp. */
1962 if (reg == 4 || reg == 5)
1965 /* The next instruction has to be "andq $-XXX, %rsp". */
1966 if (buf[offset] != 0x48
1967 || buf[offset + 2] != 0xe4
1968 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1971 offset_and = offset;
1972 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1974 /* The next instruction has to be "pushq -8(%reg)". */
1976 if (buf[offset] == 0xff)
1978 else if ((buf[offset] & 0xf6) == 0x40
1979 && buf[offset + 1] == 0xff)
1981 /* Check the REX.B bit. */
1982 if ((buf[offset] & 0x1) != 0)
1989 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1991 if (buf[offset + 1] != 0xf8
1992 || (buf[offset] & 0xf8) != 0x70)
1995 /* R/M has register. */
1996 r += buf[offset] & 7;
1998 /* Registers in leaq and pushq have to be the same. */
2002 if (current_pc > pc + offset_and)
2003 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2005 return min (pc + offset + 2, current_pc);
2008 /* Similar to amd64_analyze_stack_align for x32. */
2011 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2012 struct amd64_frame_cache *cache)
2014 /* There are 2 code sequences to re-align stack before the frame
2017 1. Use a caller-saved saved register:
2025 [addr32] leal 8(%rsp), %reg
2027 [addr32] pushq -8(%reg)
2029 2. Use a callee-saved saved register:
2039 [addr32] leal 16(%rsp), %reg
2041 [addr32] pushq -8(%reg)
2043 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2045 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2046 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2048 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2050 0x83 0xe4 0xf0 andl $-16, %esp
2051 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2056 int offset, offset_and;
2058 if (target_read_memory (pc, buf, sizeof buf))
2061 /* Skip optional addr32 prefix. */
2062 offset = buf[0] == 0x67 ? 1 : 0;
2064 /* Check caller-saved saved register. The first instruction has
2065 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2066 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2067 && buf[offset + 1] == 0x8d
2068 && buf[offset + 3] == 0x24
2069 && buf[offset + 4] == 0x8)
2071 /* MOD must be binary 10 and R/M must be binary 100. */
2072 if ((buf[offset + 2] & 0xc7) != 0x44)
2075 /* REG has register number. */
2076 reg = (buf[offset + 2] >> 3) & 7;
2078 /* Check the REX.R bit. */
2079 if ((buf[offset] & 0x4) != 0)
2086 /* Check callee-saved saved register. The first instruction
2087 has to be "pushq %reg". */
2089 if ((buf[offset] & 0xf6) == 0x40
2090 && (buf[offset + 1] & 0xf8) == 0x50)
2092 /* Check the REX.B bit. */
2093 if ((buf[offset] & 1) != 0)
2098 else if ((buf[offset] & 0xf8) != 0x50)
2102 reg += buf[offset] & 0x7;
2106 /* Skip optional addr32 prefix. */
2107 if (buf[offset] == 0x67)
2110 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2111 "leal 16(%rsp), %reg". */
2112 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2113 || buf[offset + 1] != 0x8d
2114 || buf[offset + 3] != 0x24
2115 || buf[offset + 4] != 0x10)
2118 /* MOD must be binary 10 and R/M must be binary 100. */
2119 if ((buf[offset + 2] & 0xc7) != 0x44)
2122 /* REG has register number. */
2123 r = (buf[offset + 2] >> 3) & 7;
2125 /* Check the REX.R bit. */
2126 if ((buf[offset] & 0x4) != 0)
2129 /* Registers in pushq and leaq have to be the same. */
2136 /* Rigister can't be %rsp nor %rbp. */
2137 if (reg == 4 || reg == 5)
2140 /* The next instruction may be "andq $-XXX, %rsp" or
2141 "andl $-XXX, %esp". */
2142 if (buf[offset] != 0x48)
2145 if (buf[offset + 2] != 0xe4
2146 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2149 offset_and = offset;
2150 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2152 /* Skip optional addr32 prefix. */
2153 if (buf[offset] == 0x67)
2156 /* The next instruction has to be "pushq -8(%reg)". */
2158 if (buf[offset] == 0xff)
2160 else if ((buf[offset] & 0xf6) == 0x40
2161 && buf[offset + 1] == 0xff)
2163 /* Check the REX.B bit. */
2164 if ((buf[offset] & 0x1) != 0)
2171 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2173 if (buf[offset + 1] != 0xf8
2174 || (buf[offset] & 0xf8) != 0x70)
2177 /* R/M has register. */
2178 r += buf[offset] & 7;
2180 /* Registers in leaq and pushq have to be the same. */
2184 if (current_pc > pc + offset_and)
2185 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2187 return min (pc + offset + 2, current_pc);
2190 /* Do a limited analysis of the prologue at PC and update CACHE
2191 accordingly. Bail out early if CURRENT_PC is reached. Return the
2192 address where the analysis stopped.
2194 We will handle only functions beginning with:
2197 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2199 or (for the X32 ABI):
2202 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2204 Any function that doesn't start with one of these sequences will be
2205 assumed to have no prologue and thus no valid frame pointer in
2209 amd64_analyze_prologue (struct gdbarch *gdbarch,
2210 CORE_ADDR pc, CORE_ADDR current_pc,
2211 struct amd64_frame_cache *cache)
2213 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2214 /* There are two variations of movq %rsp, %rbp. */
2215 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2216 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2217 /* Ditto for movl %esp, %ebp. */
2218 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2219 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2224 if (current_pc <= pc)
2227 if (gdbarch_ptr_bit (gdbarch) == 32)
2228 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2230 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2232 op = read_code_unsigned_integer (pc, 1, byte_order);
2234 if (op == 0x55) /* pushq %rbp */
2236 /* Take into account that we've executed the `pushq %rbp' that
2237 starts this instruction sequence. */
2238 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2239 cache->sp_offset += 8;
2241 /* If that's all, return now. */
2242 if (current_pc <= pc + 1)
2245 read_code (pc + 1, buf, 3);
2247 /* Check for `movq %rsp, %rbp'. */
2248 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2249 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2251 /* OK, we actually have a frame. */
2252 cache->frameless_p = 0;
2256 /* For X32, also check for `movq %esp, %ebp'. */
2257 if (gdbarch_ptr_bit (gdbarch) == 32)
2259 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2260 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2262 /* OK, we actually have a frame. */
2263 cache->frameless_p = 0;
2274 /* Work around false termination of prologue - GCC PR debug/48827.
2276 START_PC is the first instruction of a function, PC is its minimal already
2277 determined advanced address. Function returns PC if it has nothing to do.
2281 <-- here is 0 lines advance - the false prologue end marker.
2282 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2283 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2284 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2285 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2286 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2287 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2288 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2289 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2293 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2295 struct symtab_and_line start_pc_sal, next_sal;
2296 gdb_byte buf[4 + 8 * 7];
2302 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2303 if (start_pc_sal.symtab == NULL
2304 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2305 (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
2306 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2309 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2310 if (next_sal.line != start_pc_sal.line)
2313 /* START_PC can be from overlayed memory, ignored here. */
2314 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2318 if (buf[0] != 0x84 || buf[1] != 0xc0)
2325 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2327 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2328 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2329 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2333 if ((buf[offset + 2] & 0xc0) == 0x40)
2335 /* 8-bit displacement. */
2339 else if ((buf[offset + 2] & 0xc0) == 0x80)
2341 /* 32-bit displacement. */
2349 if (offset - 4 != buf[3])
2352 return next_sal.end;
2355 /* Return PC of first real instruction. */
2358 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2360 struct amd64_frame_cache cache;
2362 CORE_ADDR func_addr;
2364 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2366 CORE_ADDR post_prologue_pc
2367 = skip_prologue_using_sal (gdbarch, func_addr);
2368 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
2370 /* Clang always emits a line note before the prologue and another
2371 one after. We trust clang to emit usable line notes. */
2372 if (post_prologue_pc
2374 && COMPUNIT_PRODUCER (cust) != NULL
2375 && strncmp (COMPUNIT_PRODUCER (cust), "clang ",
2376 sizeof ("clang ") - 1) == 0))
2377 return max (start_pc, post_prologue_pc);
2380 amd64_init_frame_cache (&cache);
2381 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2383 if (cache.frameless_p)
2386 return amd64_skip_xmm_prologue (pc, start_pc);
2390 /* Normal frames. */
2393 amd64_frame_cache_1 (struct frame_info *this_frame,
2394 struct amd64_frame_cache *cache)
2396 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2397 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2401 cache->pc = get_frame_func (this_frame);
2403 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2406 if (cache->frameless_p)
2408 /* We didn't find a valid frame. If we're at the start of a
2409 function, or somewhere half-way its prologue, the function's
2410 frame probably hasn't been fully setup yet. Try to
2411 reconstruct the base address for the stack frame by looking
2412 at the stack pointer. For truly "frameless" functions this
2415 if (cache->saved_sp_reg != -1)
2417 /* Stack pointer has been saved. */
2418 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2419 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2421 /* We're halfway aligning the stack. */
2422 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2423 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2425 /* This will be added back below. */
2426 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2430 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2431 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2437 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2438 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2441 /* Now that we have the base address for the stack frame we can
2442 calculate the value of %rsp in the calling frame. */
2443 cache->saved_sp = cache->base + 16;
2445 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2446 frame we find it at the same offset from the reconstructed base
2447 address. If we're halfway aligning the stack, %rip is handled
2448 differently (see above). */
2449 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2450 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2452 /* Adjust all the saved registers such that they contain addresses
2453 instead of offsets. */
2454 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2455 if (cache->saved_regs[i] != -1)
2456 cache->saved_regs[i] += cache->base;
2461 static struct amd64_frame_cache *
2462 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2464 volatile struct gdb_exception ex;
2465 struct amd64_frame_cache *cache;
2470 cache = amd64_alloc_frame_cache ();
2471 *this_cache = cache;
2473 TRY_CATCH (ex, RETURN_MASK_ERROR)
2475 amd64_frame_cache_1 (this_frame, cache);
2477 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2478 throw_exception (ex);
2483 static enum unwind_stop_reason
2484 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2487 struct amd64_frame_cache *cache =
2488 amd64_frame_cache (this_frame, this_cache);
2491 return UNWIND_UNAVAILABLE;
2493 /* This marks the outermost frame. */
2494 if (cache->base == 0)
2495 return UNWIND_OUTERMOST;
2497 return UNWIND_NO_REASON;
2501 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
2502 struct frame_id *this_id)
2504 struct amd64_frame_cache *cache =
2505 amd64_frame_cache (this_frame, this_cache);
2508 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2509 else if (cache->base == 0)
2511 /* This marks the outermost frame. */
2515 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2518 static struct value *
2519 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2522 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2523 struct amd64_frame_cache *cache =
2524 amd64_frame_cache (this_frame, this_cache);
2526 gdb_assert (regnum >= 0);
2528 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2529 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2531 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2532 return frame_unwind_got_memory (this_frame, regnum,
2533 cache->saved_regs[regnum]);
2535 return frame_unwind_got_register (this_frame, regnum, regnum);
2538 static const struct frame_unwind amd64_frame_unwind =
2541 amd64_frame_unwind_stop_reason,
2542 amd64_frame_this_id,
2543 amd64_frame_prev_register,
2545 default_frame_sniffer
2548 /* Generate a bytecode expression to get the value of the saved PC. */
2551 amd64_gen_return_address (struct gdbarch *gdbarch,
2552 struct agent_expr *ax, struct axs_value *value,
2555 /* The following sequence assumes the traditional use of the base
2557 ax_reg (ax, AMD64_RBP_REGNUM);
2559 ax_simple (ax, aop_add);
2560 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2561 value->kind = axs_lvalue_memory;
2565 /* Signal trampolines. */
2567 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2568 64-bit variants. This would require using identical frame caches
2569 on both platforms. */
2571 static struct amd64_frame_cache *
2572 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2574 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2575 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2576 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2577 volatile struct gdb_exception ex;
2578 struct amd64_frame_cache *cache;
2586 cache = amd64_alloc_frame_cache ();
2588 TRY_CATCH (ex, RETURN_MASK_ERROR)
2590 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2591 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2593 addr = tdep->sigcontext_addr (this_frame);
2594 gdb_assert (tdep->sc_reg_offset);
2595 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2596 for (i = 0; i < tdep->sc_num_regs; i++)
2597 if (tdep->sc_reg_offset[i] != -1)
2598 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2602 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2603 throw_exception (ex);
2605 *this_cache = cache;
2609 static enum unwind_stop_reason
2610 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2613 struct amd64_frame_cache *cache =
2614 amd64_sigtramp_frame_cache (this_frame, this_cache);
2617 return UNWIND_UNAVAILABLE;
2619 return UNWIND_NO_REASON;
2623 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
2624 void **this_cache, struct frame_id *this_id)
2626 struct amd64_frame_cache *cache =
2627 amd64_sigtramp_frame_cache (this_frame, this_cache);
2630 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2631 else if (cache->base == 0)
2633 /* This marks the outermost frame. */
2637 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2640 static struct value *
2641 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2642 void **this_cache, int regnum)
2644 /* Make sure we've initialized the cache. */
2645 amd64_sigtramp_frame_cache (this_frame, this_cache);
2647 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2651 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2652 struct frame_info *this_frame,
2655 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2657 /* We shouldn't even bother if we don't have a sigcontext_addr
2659 if (tdep->sigcontext_addr == NULL)
2662 if (tdep->sigtramp_p != NULL)
2664 if (tdep->sigtramp_p (this_frame))
2668 if (tdep->sigtramp_start != 0)
2670 CORE_ADDR pc = get_frame_pc (this_frame);
2672 gdb_assert (tdep->sigtramp_end != 0);
2673 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2680 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2683 amd64_sigtramp_frame_unwind_stop_reason,
2684 amd64_sigtramp_frame_this_id,
2685 amd64_sigtramp_frame_prev_register,
2687 amd64_sigtramp_frame_sniffer
2692 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2694 struct amd64_frame_cache *cache =
2695 amd64_frame_cache (this_frame, this_cache);
2700 static const struct frame_base amd64_frame_base =
2702 &amd64_frame_unwind,
2703 amd64_frame_base_address,
2704 amd64_frame_base_address,
2705 amd64_frame_base_address
2708 /* Normal frames, but in a function epilogue. */
2710 /* The epilogue is defined here as the 'ret' instruction, which will
2711 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2712 the function's stack frame. */
2715 amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2718 struct compunit_symtab *cust;
2720 cust = find_pc_compunit_symtab (pc);
2721 if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
2724 if (target_read_memory (pc, &insn, 1))
2725 return 0; /* Can't read memory at pc. */
2727 if (insn != 0xc3) /* 'ret' instruction. */
2734 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2735 struct frame_info *this_frame,
2736 void **this_prologue_cache)
2738 if (frame_relative_level (this_frame) == 0)
2739 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
2740 get_frame_pc (this_frame));
2745 static struct amd64_frame_cache *
2746 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2748 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2749 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2750 volatile struct gdb_exception ex;
2751 struct amd64_frame_cache *cache;
2757 cache = amd64_alloc_frame_cache ();
2758 *this_cache = cache;
2760 TRY_CATCH (ex, RETURN_MASK_ERROR)
2762 /* Cache base will be %esp plus cache->sp_offset (-8). */
2763 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2764 cache->base = extract_unsigned_integer (buf, 8,
2765 byte_order) + cache->sp_offset;
2767 /* Cache pc will be the frame func. */
2768 cache->pc = get_frame_pc (this_frame);
2770 /* The saved %esp will be at cache->base plus 16. */
2771 cache->saved_sp = cache->base + 16;
2773 /* The saved %eip will be at cache->base plus 8. */
2774 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2778 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2779 throw_exception (ex);
2784 static enum unwind_stop_reason
2785 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2788 struct amd64_frame_cache *cache
2789 = amd64_epilogue_frame_cache (this_frame, this_cache);
2792 return UNWIND_UNAVAILABLE;
2794 return UNWIND_NO_REASON;
2798 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2800 struct frame_id *this_id)
2802 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2806 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2808 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2811 static const struct frame_unwind amd64_epilogue_frame_unwind =
2814 amd64_epilogue_frame_unwind_stop_reason,
2815 amd64_epilogue_frame_this_id,
2816 amd64_frame_prev_register,
2818 amd64_epilogue_frame_sniffer
2821 static struct frame_id
2822 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2826 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2828 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2831 /* 16 byte align the SP per frame requirements. */
2834 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2836 return sp & -(CORE_ADDR)16;
2840 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2841 in the floating-point register set REGSET to register cache
2842 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2845 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2846 int regnum, const void *fpregs, size_t len)
2848 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2849 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2851 gdb_assert (len == tdep->sizeof_fpregset);
2852 amd64_supply_fxsave (regcache, regnum, fpregs);
2855 /* Collect register REGNUM from the register cache REGCACHE and store
2856 it in the buffer specified by FPREGS and LEN as described by the
2857 floating-point register set REGSET. If REGNUM is -1, do this for
2858 all registers in REGSET. */
2861 amd64_collect_fpregset (const struct regset *regset,
2862 const struct regcache *regcache,
2863 int regnum, void *fpregs, size_t len)
2865 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2866 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2868 gdb_assert (len == tdep->sizeof_fpregset);
2869 amd64_collect_fxsave (regcache, regnum, fpregs);
2872 const struct regset amd64_fpregset =
2874 NULL, amd64_supply_fpregset, amd64_collect_fpregset
2878 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2879 %rdi. We expect its value to be a pointer to the jmp_buf structure
2880 from which we extract the address that we will land at. This
2881 address is copied into PC. This routine returns non-zero on
2885 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2889 struct gdbarch *gdbarch = get_frame_arch (frame);
2890 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2891 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2893 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2894 longjmp will land. */
2895 if (jb_pc_offset == -1)
2898 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2899 jb_addr= extract_typed_address
2900 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2901 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2904 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2909 static const int amd64_record_regmap[] =
2911 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2912 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2913 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2914 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2915 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2916 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2920 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2922 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2923 const struct target_desc *tdesc = info.target_desc;
2924 static const char *const stap_integer_prefixes[] = { "$", NULL };
2925 static const char *const stap_register_prefixes[] = { "%", NULL };
2926 static const char *const stap_register_indirection_prefixes[] = { "(",
2928 static const char *const stap_register_indirection_suffixes[] = { ")",
2931 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2932 floating-point registers. */
2933 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2934 tdep->fpregset = &amd64_fpregset;
2936 if (! tdesc_has_registers (tdesc))
2937 tdesc = tdesc_amd64;
2938 tdep->tdesc = tdesc;
2940 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2941 tdep->register_names = amd64_register_names;
2943 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
2945 tdep->zmmh_register_names = amd64_zmmh_names;
2946 tdep->k_register_names = amd64_k_names;
2947 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
2948 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
2950 tdep->num_zmm_regs = 32;
2951 tdep->num_xmm_avx512_regs = 16;
2952 tdep->num_ymm_avx512_regs = 16;
2954 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
2955 tdep->k0_regnum = AMD64_K0_REGNUM;
2956 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
2957 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
2960 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
2962 tdep->ymmh_register_names = amd64_ymmh_names;
2963 tdep->num_ymm_regs = 16;
2964 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
2967 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
2969 tdep->mpx_register_names = amd64_mpx_names;
2970 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
2971 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
2974 tdep->num_byte_regs = 20;
2975 tdep->num_word_regs = 16;
2976 tdep->num_dword_regs = 16;
2977 /* Avoid wiring in the MMX registers for now. */
2978 tdep->num_mmx_regs = 0;
2980 set_gdbarch_pseudo_register_read_value (gdbarch,
2981 amd64_pseudo_register_read_value);
2982 set_gdbarch_pseudo_register_write (gdbarch,
2983 amd64_pseudo_register_write);
2985 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
2987 /* AMD64 has an FPU and 16 SSE registers. */
2988 tdep->st0_regnum = AMD64_ST0_REGNUM;
2989 tdep->num_xmm_regs = 16;
2991 /* This is what all the fuss is about. */
2992 set_gdbarch_long_bit (gdbarch, 64);
2993 set_gdbarch_long_long_bit (gdbarch, 64);
2994 set_gdbarch_ptr_bit (gdbarch, 64);
2996 /* In contrast to the i386, on AMD64 a `long double' actually takes
2997 up 128 bits, even though it's still based on the i387 extended
2998 floating-point format which has only 80 significant bits. */
2999 set_gdbarch_long_double_bit (gdbarch, 128);
3001 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3003 /* Register numbers of various important registers. */
3004 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3005 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3006 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3007 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3009 /* The "default" register numbering scheme for AMD64 is referred to
3010 as the "DWARF Register Number Mapping" in the System V psABI.
3011 The preferred debugging format for all known AMD64 targets is
3012 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3013 DWARF-1), but we provide the same mapping just in case. This
3014 mapping is also used for stabs, which GCC does support. */
3015 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3016 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3018 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3019 be in use on any of the supported AMD64 targets. */
3021 /* Call dummy code. */
3022 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3023 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3024 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3026 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3027 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3028 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3030 set_gdbarch_return_value (gdbarch, amd64_return_value);
3032 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3034 tdep->record_regmap = amd64_record_regmap;
3036 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3038 /* Hook the function epilogue frame unwinder. This unwinder is
3039 appended to the list first, so that it supercedes the other
3040 unwinders in function epilogues. */
3041 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3043 /* Hook the prologue-based frame unwinders. */
3044 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3045 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3046 frame_base_set_default (gdbarch, &amd64_frame_base);
3048 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3050 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3052 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3054 /* SystemTap variables and functions. */
3055 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3056 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3057 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3058 stap_register_indirection_prefixes);
3059 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3060 stap_register_indirection_suffixes);
3061 set_gdbarch_stap_is_single_operand (gdbarch,
3062 i386_stap_is_single_operand);
3063 set_gdbarch_stap_parse_special_token (gdbarch,
3064 i386_stap_parse_special_token);
3065 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3066 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3067 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3071 static struct type *
3072 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3074 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3076 switch (regnum - tdep->eax_regnum)
3078 case AMD64_RBP_REGNUM: /* %ebp */
3079 case AMD64_RSP_REGNUM: /* %esp */
3080 return builtin_type (gdbarch)->builtin_data_ptr;
3081 case AMD64_RIP_REGNUM: /* %eip */
3082 return builtin_type (gdbarch)->builtin_func_ptr;
3085 return i386_pseudo_register_type (gdbarch, regnum);
3089 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
3091 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3092 const struct target_desc *tdesc = info.target_desc;
3094 amd64_init_abi (info, gdbarch);
3096 if (! tdesc_has_registers (tdesc))
3098 tdep->tdesc = tdesc;
3100 tdep->num_dword_regs = 17;
3101 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3103 set_gdbarch_long_bit (gdbarch, 32);
3104 set_gdbarch_ptr_bit (gdbarch, 32);
3107 /* Provide a prototype to silence -Wmissing-prototypes. */
3108 void _initialize_amd64_tdep (void);
3111 _initialize_amd64_tdep (void)
3113 initialize_tdesc_amd64 ();
3114 initialize_tdesc_amd64_avx ();
3115 initialize_tdesc_amd64_mpx ();
3116 initialize_tdesc_amd64_avx512 ();
3118 initialize_tdesc_x32 ();
3119 initialize_tdesc_x32_avx ();
3120 initialize_tdesc_x32_avx512 ();
3124 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3125 sense that the instruction pointer and data pointer are simply
3126 64-bit offsets into the code segment and the data segment instead
3127 of a selector offset pair. The functions below store the upper 32
3128 bits of these pointers (instead of just the 16-bits of the segment
3131 /* Fill register REGNUM in REGCACHE with the appropriate
3132 floating-point or SSE register value from *FXSAVE. If REGNUM is
3133 -1, do this for all registers. This function masks off any of the
3134 reserved bits in *FXSAVE. */
3137 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3140 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3141 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3143 i387_supply_fxsave (regcache, regnum, fxsave);
3146 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3148 const gdb_byte *regs = fxsave;
3150 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3151 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3152 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3153 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3157 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3160 amd64_supply_xsave (struct regcache *regcache, int regnum,
3163 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3164 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3166 i387_supply_xsave (regcache, regnum, xsave);
3169 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3171 const gdb_byte *regs = xsave;
3173 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3174 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
3176 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3177 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
3182 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3183 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3184 all registers. This function doesn't touch any of the reserved
3188 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3191 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3192 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3193 gdb_byte *regs = fxsave;
3195 i387_collect_fxsave (regcache, regnum, fxsave);
3197 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3199 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3200 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3201 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3202 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3206 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3209 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3210 void *xsave, int gcore)
3212 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3213 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3214 gdb_byte *regs = xsave;
3216 i387_collect_xsave (regcache, regnum, xsave, gcore);
3218 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3220 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3221 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
3223 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3224 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),