1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
47 #include "common/selftest.h"
49 #include "aarch64-tdep.h"
50 #include "aarch64-ravenscar-thread.h"
53 #include "elf/aarch64.h"
55 #include "common/vec.h"
58 #include "record-full.h"
59 #include "arch/aarch64-insn.h"
61 #include "opcode/aarch64.h"
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
70 #define HA_MAX_NUM_FLDS 4
72 /* All possible aarch64 target descriptors. */
73 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
75 /* The standard register names, and all the valid aliases for them. */
78 const char *const name;
80 } aarch64_register_aliases[] =
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
158 /* The SVE 'Z' and 'P' registers. */
159 static const char *const aarch64_sve_register_names[] =
161 /* These registers must appear in consecutive RAW register number
162 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
163 "z0", "z1", "z2", "z3",
164 "z4", "z5", "z6", "z7",
165 "z8", "z9", "z10", "z11",
166 "z12", "z13", "z14", "z15",
167 "z16", "z17", "z18", "z19",
168 "z20", "z21", "z22", "z23",
169 "z24", "z25", "z26", "z27",
170 "z28", "z29", "z30", "z31",
172 "p0", "p1", "p2", "p3",
173 "p4", "p5", "p6", "p7",
174 "p8", "p9", "p10", "p11",
175 "p12", "p13", "p14", "p15",
179 static const char *const aarch64_pauth_register_names[] =
181 /* Authentication mask for data pointer. */
183 /* Authentication mask for code pointer. */
187 /* AArch64 prologue cache structure. */
188 struct aarch64_prologue_cache
190 /* The program counter at the start of the function. It is used to
191 identify this frame as a prologue frame. */
194 /* The program counter at the time this frame was created; i.e. where
195 this function was called from. It is used to identify this frame as a
199 /* The stack pointer at the time this frame was created; i.e. the
200 caller's stack pointer when this function was called. It is used
201 to identify this frame. */
204 /* Is the target available to read from? */
207 /* The frame base for this frame is just prev_sp - frame size.
208 FRAMESIZE is the distance from the frame pointer to the
209 initial stack pointer. */
212 /* The register used to hold the frame pointer for this frame. */
215 /* Saved register offsets. */
216 struct trad_frame_saved_reg *saved_regs;
220 show_aarch64_debug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
223 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
228 /* Abstract instruction reader. */
230 class abstract_instruction_reader
233 /* Read in one instruction. */
234 virtual ULONGEST read (CORE_ADDR memaddr, int len,
235 enum bfd_endian byte_order) = 0;
238 /* Instruction reader from real target. */
240 class instruction_reader : public abstract_instruction_reader
243 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
246 return read_code_unsigned_integer (memaddr, len, byte_order);
252 /* If address signing is enabled, mask off the signature bits from ADDR, using
253 the register values in THIS_FRAME. */
256 aarch64_frame_unmask_address (struct gdbarch_tdep *tdep,
257 struct frame_info *this_frame,
260 if (tdep->has_pauth ()
261 && frame_unwind_register_unsigned (this_frame,
262 tdep->pauth_ra_state_regnum))
264 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
265 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
266 addr = addr & ~cmask;
272 /* Analyze a prologue, looking for a recognizable stack frame
273 and frame pointer. Scan until we encounter a store that could
274 clobber the stack frame unexpectedly, or an unknown instruction. */
277 aarch64_analyze_prologue (struct gdbarch *gdbarch,
278 CORE_ADDR start, CORE_ADDR limit,
279 struct aarch64_prologue_cache *cache,
280 abstract_instruction_reader& reader)
282 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
284 /* Track X registers and D registers in prologue. */
285 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
287 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
288 regs[i] = pv_register (i, 0);
289 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
291 for (; start < limit; start += 4)
296 insn = reader.read (start, 4, byte_order_for_code);
298 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
301 if (inst.opcode->iclass == addsub_imm
302 && (inst.opcode->op == OP_ADD
303 || strcmp ("sub", inst.opcode->name) == 0))
305 unsigned rd = inst.operands[0].reg.regno;
306 unsigned rn = inst.operands[1].reg.regno;
308 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
309 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
310 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
311 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
313 if (inst.opcode->op == OP_ADD)
315 regs[rd] = pv_add_constant (regs[rn],
316 inst.operands[2].imm.value);
320 regs[rd] = pv_add_constant (regs[rn],
321 -inst.operands[2].imm.value);
324 else if (inst.opcode->iclass == pcreladdr
325 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
327 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
328 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
330 regs[inst.operands[0].reg.regno] = pv_unknown ();
332 else if (inst.opcode->iclass == branch_imm)
334 /* Stop analysis on branch. */
337 else if (inst.opcode->iclass == condbranch)
339 /* Stop analysis on branch. */
342 else if (inst.opcode->iclass == branch_reg)
344 /* Stop analysis on branch. */
347 else if (inst.opcode->iclass == compbranch)
349 /* Stop analysis on branch. */
352 else if (inst.opcode->op == OP_MOVZ)
354 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
355 regs[inst.operands[0].reg.regno] = pv_unknown ();
357 else if (inst.opcode->iclass == log_shift
358 && strcmp (inst.opcode->name, "orr") == 0)
360 unsigned rd = inst.operands[0].reg.regno;
361 unsigned rn = inst.operands[1].reg.regno;
362 unsigned rm = inst.operands[2].reg.regno;
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
365 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
366 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
368 if (inst.operands[2].shifter.amount == 0
369 && rn == AARCH64_SP_REGNUM)
375 debug_printf ("aarch64: prologue analysis gave up "
376 "addr=%s opcode=0x%x (orr x register)\n",
377 core_addr_to_string_nz (start), insn);
382 else if (inst.opcode->op == OP_STUR)
384 unsigned rt = inst.operands[0].reg.regno;
385 unsigned rn = inst.operands[1].addr.base_regno;
387 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
389 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
392 gdb_assert (!inst.operands[1].addr.offset.is_reg);
394 stack.store (pv_add_constant (regs[rn],
395 inst.operands[1].addr.offset.imm),
396 is64 ? 8 : 4, regs[rt]);
398 else if ((inst.opcode->iclass == ldstpair_off
399 || (inst.opcode->iclass == ldstpair_indexed
400 && inst.operands[2].addr.preind))
401 && strcmp ("stp", inst.opcode->name) == 0)
403 /* STP with addressing mode Pre-indexed and Base register. */
406 unsigned rn = inst.operands[2].addr.base_regno;
407 int32_t imm = inst.operands[2].addr.offset.imm;
409 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
410 || inst.operands[0].type == AARCH64_OPND_Ft);
411 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
412 || inst.operands[1].type == AARCH64_OPND_Ft2);
413 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
414 gdb_assert (!inst.operands[2].addr.offset.is_reg);
416 /* If recording this store would invalidate the store area
417 (perhaps because rn is not known) then we should abandon
418 further prologue analysis. */
419 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
422 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
425 rt1 = inst.operands[0].reg.regno;
426 rt2 = inst.operands[1].reg.regno;
427 if (inst.operands[0].type == AARCH64_OPND_Ft)
429 /* Only bottom 64-bit of each V register (D register) need
431 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
432 rt1 += AARCH64_X_REGISTER_COUNT;
433 rt2 += AARCH64_X_REGISTER_COUNT;
436 stack.store (pv_add_constant (regs[rn], imm), 8,
438 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
441 if (inst.operands[2].addr.writeback)
442 regs[rn] = pv_add_constant (regs[rn], imm);
445 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
446 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
447 && (inst.opcode->op == OP_STR_POS
448 || inst.opcode->op == OP_STRF_POS)))
449 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
450 && strcmp ("str", inst.opcode->name) == 0)
452 /* STR (immediate) */
453 unsigned int rt = inst.operands[0].reg.regno;
454 int32_t imm = inst.operands[1].addr.offset.imm;
455 unsigned int rn = inst.operands[1].addr.base_regno;
457 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
458 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
459 || inst.operands[0].type == AARCH64_OPND_Ft);
461 if (inst.operands[0].type == AARCH64_OPND_Ft)
463 /* Only bottom 64-bit of each V register (D register) need
465 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
466 rt += AARCH64_X_REGISTER_COUNT;
469 stack.store (pv_add_constant (regs[rn], imm),
470 is64 ? 8 : 4, regs[rt]);
471 if (inst.operands[1].addr.writeback)
472 regs[rn] = pv_add_constant (regs[rn], imm);
474 else if (inst.opcode->iclass == testbranch)
476 /* Stop analysis on branch. */
479 else if (inst.opcode->iclass == ic_system)
481 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
482 int ra_state_val = 0;
484 if (insn == 0xd503233f /* paciasp. */
485 || insn == 0xd503237f /* pacibsp. */)
487 /* Return addresses are mangled. */
490 else if (insn == 0xd50323bf /* autiasp. */
491 || insn == 0xd50323ff /* autibsp. */)
493 /* Return addresses are not mangled. */
499 debug_printf ("aarch64: prologue analysis gave up addr=%s"
500 " opcode=0x%x (iclass)\n",
501 core_addr_to_string_nz (start), insn);
505 if (tdep->has_pauth () && cache != nullptr)
506 trad_frame_set_value (cache->saved_regs,
507 tdep->pauth_ra_state_regnum,
514 debug_printf ("aarch64: prologue analysis gave up addr=%s"
516 core_addr_to_string_nz (start), insn);
525 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
527 /* Frame pointer is fp. Frame size is constant. */
528 cache->framereg = AARCH64_FP_REGNUM;
529 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
531 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
533 /* Try the stack pointer. */
534 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
535 cache->framereg = AARCH64_SP_REGNUM;
539 /* We're just out of luck. We don't know where the frame is. */
540 cache->framereg = -1;
541 cache->framesize = 0;
544 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
548 if (stack.find_reg (gdbarch, i, &offset))
549 cache->saved_regs[i].addr = offset;
552 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
554 int regnum = gdbarch_num_regs (gdbarch);
557 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
559 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
566 aarch64_analyze_prologue (struct gdbarch *gdbarch,
567 CORE_ADDR start, CORE_ADDR limit,
568 struct aarch64_prologue_cache *cache)
570 instruction_reader reader;
572 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
578 namespace selftests {
580 /* Instruction reader from manually cooked instruction sequences. */
582 class instruction_reader_test : public abstract_instruction_reader
585 template<size_t SIZE>
586 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
587 : m_insns (insns), m_insns_size (SIZE)
590 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
593 SELF_CHECK (len == 4);
594 SELF_CHECK (memaddr % 4 == 0);
595 SELF_CHECK (memaddr / 4 < m_insns_size);
597 return m_insns[memaddr / 4];
601 const uint32_t *m_insns;
606 aarch64_analyze_prologue_test (void)
608 struct gdbarch_info info;
610 gdbarch_info_init (&info);
611 info.bfd_arch_info = bfd_scan_arch ("aarch64");
613 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
614 SELF_CHECK (gdbarch != NULL);
616 struct aarch64_prologue_cache cache;
617 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
619 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
621 /* Test the simple prologue in which frame pointer is used. */
623 static const uint32_t insns[] = {
624 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
625 0x910003fd, /* mov x29, sp */
626 0x97ffffe6, /* bl 0x400580 */
628 instruction_reader_test reader (insns);
630 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
631 SELF_CHECK (end == 4 * 2);
633 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
634 SELF_CHECK (cache.framesize == 272);
636 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
638 if (i == AARCH64_FP_REGNUM)
639 SELF_CHECK (cache.saved_regs[i].addr == -272);
640 else if (i == AARCH64_LR_REGNUM)
641 SELF_CHECK (cache.saved_regs[i].addr == -264);
643 SELF_CHECK (cache.saved_regs[i].addr == -1);
646 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
648 int regnum = gdbarch_num_regs (gdbarch);
650 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
655 /* Test a prologue in which STR is used and frame pointer is not
658 static const uint32_t insns[] = {
659 0xf81d0ff3, /* str x19, [sp, #-48]! */
660 0xb9002fe0, /* str w0, [sp, #44] */
661 0xf90013e1, /* str x1, [sp, #32]*/
662 0xfd000fe0, /* str d0, [sp, #24] */
663 0xaa0203f3, /* mov x19, x2 */
664 0xf94013e0, /* ldr x0, [sp, #32] */
666 instruction_reader_test reader (insns);
668 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
669 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
671 SELF_CHECK (end == 4 * 5);
673 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
674 SELF_CHECK (cache.framesize == 48);
676 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
679 SELF_CHECK (cache.saved_regs[i].addr == -16);
681 SELF_CHECK (cache.saved_regs[i].addr == -48);
683 SELF_CHECK (cache.saved_regs[i].addr == -1);
686 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
688 int regnum = gdbarch_num_regs (gdbarch);
691 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
694 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
699 /* Test a prologue in which there is a return address signing instruction. */
700 if (tdep->has_pauth ())
702 static const uint32_t insns[] = {
703 0xd503233f, /* paciasp */
704 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
705 0x910003fd, /* mov x29, sp */
706 0xf801c3f3, /* str x19, [sp, #28] */
707 0xb9401fa0, /* ldr x19, [x29, #28] */
709 instruction_reader_test reader (insns);
711 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
712 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
715 SELF_CHECK (end == 4 * 4);
716 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
717 SELF_CHECK (cache.framesize == 48);
719 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
722 SELF_CHECK (cache.saved_regs[i].addr == -20);
723 else if (i == AARCH64_FP_REGNUM)
724 SELF_CHECK (cache.saved_regs[i].addr == -48);
725 else if (i == AARCH64_LR_REGNUM)
726 SELF_CHECK (cache.saved_regs[i].addr == -40);
728 SELF_CHECK (cache.saved_regs[i].addr == -1);
731 if (tdep->has_pauth ())
733 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
734 tdep->pauth_ra_state_regnum));
735 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
739 } // namespace selftests
740 #endif /* GDB_SELF_TEST */
742 /* Implement the "skip_prologue" gdbarch method. */
745 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
747 CORE_ADDR func_addr, limit_pc;
749 /* See if we can determine the end of the prologue via the symbol
750 table. If so, then return either PC, or the PC after the
751 prologue, whichever is greater. */
752 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
754 CORE_ADDR post_prologue_pc
755 = skip_prologue_using_sal (gdbarch, func_addr);
757 if (post_prologue_pc != 0)
758 return std::max (pc, post_prologue_pc);
761 /* Can't determine prologue from the symbol table, need to examine
764 /* Find an upper limit on the function prologue using the debug
765 information. If the debug information could not be used to
766 provide that bound, then use an arbitrary large number as the
768 limit_pc = skip_prologue_using_sal (gdbarch, pc);
770 limit_pc = pc + 128; /* Magic. */
772 /* Try disassembling prologue. */
773 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
776 /* Scan the function prologue for THIS_FRAME and populate the prologue
780 aarch64_scan_prologue (struct frame_info *this_frame,
781 struct aarch64_prologue_cache *cache)
783 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
784 CORE_ADDR prologue_start;
785 CORE_ADDR prologue_end;
786 CORE_ADDR prev_pc = get_frame_pc (this_frame);
787 struct gdbarch *gdbarch = get_frame_arch (this_frame);
789 cache->prev_pc = prev_pc;
791 /* Assume we do not find a frame. */
792 cache->framereg = -1;
793 cache->framesize = 0;
795 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
798 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
802 /* No line info so use the current PC. */
803 prologue_end = prev_pc;
805 else if (sal.end < prologue_end)
807 /* The next line begins after the function end. */
808 prologue_end = sal.end;
811 prologue_end = std::min (prologue_end, prev_pc);
812 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
818 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
822 cache->framereg = AARCH64_FP_REGNUM;
823 cache->framesize = 16;
824 cache->saved_regs[29].addr = 0;
825 cache->saved_regs[30].addr = 8;
829 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
830 function may throw an exception if the inferior's registers or memory is
834 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
835 struct aarch64_prologue_cache *cache)
837 CORE_ADDR unwound_fp;
840 aarch64_scan_prologue (this_frame, cache);
842 if (cache->framereg == -1)
845 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
849 cache->prev_sp = unwound_fp + cache->framesize;
851 /* Calculate actual addresses of saved registers using offsets
852 determined by aarch64_analyze_prologue. */
853 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
854 if (trad_frame_addr_p (cache->saved_regs, reg))
855 cache->saved_regs[reg].addr += cache->prev_sp;
857 cache->func = get_frame_func (this_frame);
859 cache->available_p = 1;
862 /* Allocate and fill in *THIS_CACHE with information about the prologue of
863 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
864 Return a pointer to the current aarch64_prologue_cache in
867 static struct aarch64_prologue_cache *
868 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
870 struct aarch64_prologue_cache *cache;
872 if (*this_cache != NULL)
873 return (struct aarch64_prologue_cache *) *this_cache;
875 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
876 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
881 aarch64_make_prologue_cache_1 (this_frame, cache);
883 catch (const gdb_exception_error &ex)
885 if (ex.error != NOT_AVAILABLE_ERROR)
892 /* Implement the "stop_reason" frame_unwind method. */
894 static enum unwind_stop_reason
895 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
898 struct aarch64_prologue_cache *cache
899 = aarch64_make_prologue_cache (this_frame, this_cache);
901 if (!cache->available_p)
902 return UNWIND_UNAVAILABLE;
904 /* Halt the backtrace at "_start". */
905 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
906 return UNWIND_OUTERMOST;
908 /* We've hit a wall, stop. */
909 if (cache->prev_sp == 0)
910 return UNWIND_OUTERMOST;
912 return UNWIND_NO_REASON;
915 /* Our frame ID for a normal frame is the current function's starting
916 PC and the caller's SP when we were called. */
919 aarch64_prologue_this_id (struct frame_info *this_frame,
920 void **this_cache, struct frame_id *this_id)
922 struct aarch64_prologue_cache *cache
923 = aarch64_make_prologue_cache (this_frame, this_cache);
925 if (!cache->available_p)
926 *this_id = frame_id_build_unavailable_stack (cache->func);
928 *this_id = frame_id_build (cache->prev_sp, cache->func);
931 /* Implement the "prev_register" frame_unwind method. */
933 static struct value *
934 aarch64_prologue_prev_register (struct frame_info *this_frame,
935 void **this_cache, int prev_regnum)
937 struct aarch64_prologue_cache *cache
938 = aarch64_make_prologue_cache (this_frame, this_cache);
940 /* If we are asked to unwind the PC, then we need to return the LR
941 instead. The prologue may save PC, but it will point into this
942 frame's prologue, not the next frame's resume location. */
943 if (prev_regnum == AARCH64_PC_REGNUM)
946 struct gdbarch *gdbarch = get_frame_arch (this_frame);
947 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
949 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
951 if (tdep->has_pauth ()
952 && trad_frame_value_p (cache->saved_regs,
953 tdep->pauth_ra_state_regnum))
954 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
956 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
959 /* SP is generally not saved to the stack, but this frame is
960 identified by the next frame's stack pointer at the time of the
961 call. The value was already reconstructed into PREV_SP. */
974 if (prev_regnum == AARCH64_SP_REGNUM)
975 return frame_unwind_got_constant (this_frame, prev_regnum,
978 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
982 /* AArch64 prologue unwinder. */
983 struct frame_unwind aarch64_prologue_unwind =
986 aarch64_prologue_frame_unwind_stop_reason,
987 aarch64_prologue_this_id,
988 aarch64_prologue_prev_register,
990 default_frame_sniffer
993 /* Allocate and fill in *THIS_CACHE with information about the prologue of
994 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
995 Return a pointer to the current aarch64_prologue_cache in
998 static struct aarch64_prologue_cache *
999 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1001 struct aarch64_prologue_cache *cache;
1003 if (*this_cache != NULL)
1004 return (struct aarch64_prologue_cache *) *this_cache;
1006 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1007 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1008 *this_cache = cache;
1012 cache->prev_sp = get_frame_register_unsigned (this_frame,
1014 cache->prev_pc = get_frame_pc (this_frame);
1015 cache->available_p = 1;
1017 catch (const gdb_exception_error &ex)
1019 if (ex.error != NOT_AVAILABLE_ERROR)
1026 /* Implement the "stop_reason" frame_unwind method. */
1028 static enum unwind_stop_reason
1029 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1032 struct aarch64_prologue_cache *cache
1033 = aarch64_make_stub_cache (this_frame, this_cache);
1035 if (!cache->available_p)
1036 return UNWIND_UNAVAILABLE;
1038 return UNWIND_NO_REASON;
1041 /* Our frame ID for a stub frame is the current SP and LR. */
1044 aarch64_stub_this_id (struct frame_info *this_frame,
1045 void **this_cache, struct frame_id *this_id)
1047 struct aarch64_prologue_cache *cache
1048 = aarch64_make_stub_cache (this_frame, this_cache);
1050 if (cache->available_p)
1051 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1053 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1056 /* Implement the "sniffer" frame_unwind method. */
1059 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1060 struct frame_info *this_frame,
1061 void **this_prologue_cache)
1063 CORE_ADDR addr_in_block;
1066 addr_in_block = get_frame_address_in_block (this_frame);
1067 if (in_plt_section (addr_in_block)
1068 /* We also use the stub winder if the target memory is unreadable
1069 to avoid having the prologue unwinder trying to read it. */
1070 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1076 /* AArch64 stub unwinder. */
1077 struct frame_unwind aarch64_stub_unwind =
1080 aarch64_stub_frame_unwind_stop_reason,
1081 aarch64_stub_this_id,
1082 aarch64_prologue_prev_register,
1084 aarch64_stub_unwind_sniffer
1087 /* Return the frame base address of *THIS_FRAME. */
1090 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1092 struct aarch64_prologue_cache *cache
1093 = aarch64_make_prologue_cache (this_frame, this_cache);
1095 return cache->prev_sp - cache->framesize;
1098 /* AArch64 default frame base information. */
1099 struct frame_base aarch64_normal_base =
1101 &aarch64_prologue_unwind,
1102 aarch64_normal_frame_base,
1103 aarch64_normal_frame_base,
1104 aarch64_normal_frame_base
1107 /* Return the value of the REGNUM register in the previous frame of
1110 static struct value *
1111 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1112 void **this_cache, int regnum)
1114 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1119 case AARCH64_PC_REGNUM:
1120 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1121 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
1122 return frame_unwind_got_constant (this_frame, regnum, lr);
1125 internal_error (__FILE__, __LINE__,
1126 _("Unexpected register %d"), regnum);
1130 static const unsigned char op_lit0 = DW_OP_lit0;
1131 static const unsigned char op_lit1 = DW_OP_lit1;
1133 /* Implement the "init_reg" dwarf2_frame_ops method. */
1136 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1137 struct dwarf2_frame_state_reg *reg,
1138 struct frame_info *this_frame)
1140 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1144 case AARCH64_PC_REGNUM:
1145 reg->how = DWARF2_FRAME_REG_FN;
1146 reg->loc.fn = aarch64_dwarf2_prev_register;
1149 case AARCH64_SP_REGNUM:
1150 reg->how = DWARF2_FRAME_REG_CFA;
1154 /* Init pauth registers. */
1155 if (tdep->has_pauth ())
1157 if (regnum == tdep->pauth_ra_state_regnum)
1159 /* Initialize RA_STATE to zero. */
1160 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1161 reg->loc.exp.start = &op_lit0;
1162 reg->loc.exp.len = 1;
1165 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1166 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1168 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1174 /* Implement the execute_dwarf_cfa_vendor_op method. */
1177 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1178 struct dwarf2_frame_state *fs)
1180 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1181 struct dwarf2_frame_state_reg *ra_state;
1183 if (op == DW_CFA_AARCH64_negate_ra_state)
1185 /* On systems without pauth, treat as a nop. */
1186 if (!tdep->has_pauth ())
1189 /* Allocate RA_STATE column if it's not allocated yet. */
1190 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1192 /* Toggle the status of RA_STATE between 0 and 1. */
1193 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1194 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1196 if (ra_state->loc.exp.start == nullptr
1197 || ra_state->loc.exp.start == &op_lit0)
1198 ra_state->loc.exp.start = &op_lit1;
1200 ra_state->loc.exp.start = &op_lit0;
1202 ra_state->loc.exp.len = 1;
1210 /* When arguments must be pushed onto the stack, they go on in reverse
1211 order. The code below implements a FILO (stack) to do this. */
1215 /* Value to pass on stack. It can be NULL if this item is for stack
1217 const gdb_byte *data;
1219 /* Size in bytes of value to pass on stack. */
1223 /* Implement the gdbarch type alignment method, overrides the generic
1224 alignment algorithm for anything that is aarch64 specific. */
1227 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1229 t = check_typedef (t);
1230 if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
1232 /* Use the natural alignment for vector types (the same for
1233 scalar type), but the maximum alignment is 128-bit. */
1234 if (TYPE_LENGTH (t) > 16)
1237 return TYPE_LENGTH (t);
1240 /* Allow the common code to calculate the alignment. */
1244 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1246 Return the number of register required, or -1 on failure.
1248 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1249 to the element, else fail if the type of this element does not match the
1253 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1254 struct type **fundamental_type)
1256 if (type == nullptr)
1259 switch (TYPE_CODE (type))
1262 if (TYPE_LENGTH (type) > 16)
1265 if (*fundamental_type == nullptr)
1266 *fundamental_type = type;
1267 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1268 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1273 case TYPE_CODE_COMPLEX:
1275 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1276 if (TYPE_LENGTH (target_type) > 16)
1279 if (*fundamental_type == nullptr)
1280 *fundamental_type = target_type;
1281 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1282 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1288 case TYPE_CODE_ARRAY:
1290 if (TYPE_VECTOR (type))
1292 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1295 if (*fundamental_type == nullptr)
1296 *fundamental_type = type;
1297 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1298 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1305 struct type *target_type = TYPE_TARGET_TYPE (type);
1306 int count = aapcs_is_vfp_call_or_return_candidate_1
1307 (target_type, fundamental_type);
1312 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1317 case TYPE_CODE_STRUCT:
1318 case TYPE_CODE_UNION:
1322 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1324 /* Ignore any static fields. */
1325 if (field_is_static (&TYPE_FIELD (type, i)))
1328 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1330 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1331 (member, fundamental_type);
1332 if (sub_count == -1)
1337 /* Ensure there is no padding between the fields (allowing for empty
1338 zero length structs) */
1339 int ftype_length = (*fundamental_type == nullptr)
1340 ? 0 : TYPE_LENGTH (*fundamental_type);
1341 if (count * ftype_length != TYPE_LENGTH (type))
1354 /* Return true if an argument, whose type is described by TYPE, can be passed or
1355 returned in simd/fp registers, providing enough parameter passing registers
1356 are available. This is as described in the AAPCS64.
1358 Upon successful return, *COUNT returns the number of needed registers,
1359 *FUNDAMENTAL_TYPE contains the type of those registers.
1361 Candidate as per the AAPCS64 5.4.2.C is either a:
1364 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1365 all the members are floats and has at most 4 members.
1366 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1367 all the members are short vectors and has at most 4 members.
1370 Note that HFAs and HVAs can include nested structures and arrays. */
1373 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1374 struct type **fundamental_type)
1376 if (type == nullptr)
1379 *fundamental_type = nullptr;
1381 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1384 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1393 /* AArch64 function call information structure. */
1394 struct aarch64_call_info
1396 /* the current argument number. */
1397 unsigned argnum = 0;
1399 /* The next general purpose register number, equivalent to NGRN as
1400 described in the AArch64 Procedure Call Standard. */
1403 /* The next SIMD and floating point register number, equivalent to
1404 NSRN as described in the AArch64 Procedure Call Standard. */
1407 /* The next stacked argument address, equivalent to NSAA as
1408 described in the AArch64 Procedure Call Standard. */
1411 /* Stack item vector. */
1412 std::vector<stack_item_t> si;
1415 /* Pass a value in a sequence of consecutive X registers. The caller
1416 is responsbile for ensuring sufficient registers are available. */
1419 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1420 struct aarch64_call_info *info, struct type *type,
1423 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1424 int len = TYPE_LENGTH (type);
1425 enum type_code typecode = TYPE_CODE (type);
1426 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1427 const bfd_byte *buf = value_contents (arg);
1433 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1434 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1438 /* Adjust sub-word struct/union args when big-endian. */
1439 if (byte_order == BFD_ENDIAN_BIG
1440 && partial_len < X_REGISTER_SIZE
1441 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1442 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1446 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1447 gdbarch_register_name (gdbarch, regnum),
1448 phex (regval, X_REGISTER_SIZE));
1450 regcache_cooked_write_unsigned (regcache, regnum, regval);
1457 /* Attempt to marshall a value in a V register. Return 1 if
1458 successful, or 0 if insufficient registers are available. This
1459 function, unlike the equivalent pass_in_x() function does not
1460 handle arguments spread across multiple registers. */
1463 pass_in_v (struct gdbarch *gdbarch,
1464 struct regcache *regcache,
1465 struct aarch64_call_info *info,
1466 int len, const bfd_byte *buf)
1470 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1471 /* Enough space for a full vector register. */
1472 gdb_byte reg[register_size (gdbarch, regnum)];
1473 gdb_assert (len <= sizeof (reg));
1478 memset (reg, 0, sizeof (reg));
1479 /* PCS C.1, the argument is allocated to the least significant
1480 bits of V register. */
1481 memcpy (reg, buf, len);
1482 regcache->cooked_write (regnum, reg);
1486 debug_printf ("arg %d in %s\n", info->argnum,
1487 gdbarch_register_name (gdbarch, regnum));
1495 /* Marshall an argument onto the stack. */
1498 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1501 const bfd_byte *buf = value_contents (arg);
1502 int len = TYPE_LENGTH (type);
1508 align = type_align (type);
1510 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1511 Natural alignment of the argument's type. */
1512 align = align_up (align, 8);
1514 /* The AArch64 PCS requires at most doubleword alignment. */
1520 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1526 info->si.push_back (item);
1529 if (info->nsaa & (align - 1))
1531 /* Push stack alignment padding. */
1532 int pad = align - (info->nsaa & (align - 1));
1537 info->si.push_back (item);
1542 /* Marshall an argument into a sequence of one or more consecutive X
1543 registers or, if insufficient X registers are available then onto
1547 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1548 struct aarch64_call_info *info, struct type *type,
1551 int len = TYPE_LENGTH (type);
1552 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1554 /* PCS C.13 - Pass in registers if we have enough spare */
1555 if (info->ngrn + nregs <= 8)
1557 pass_in_x (gdbarch, regcache, info, type, arg);
1558 info->ngrn += nregs;
1563 pass_on_stack (info, type, arg);
1567 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1568 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1569 registers. A return value of false is an error state as the value will have
1570 been partially passed to the stack. */
1572 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1573 struct aarch64_call_info *info, struct type *arg_type,
1576 switch (TYPE_CODE (arg_type))
1579 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1580 value_contents (arg));
1583 case TYPE_CODE_COMPLEX:
1585 const bfd_byte *buf = value_contents (arg);
1586 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1588 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1592 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1593 buf + TYPE_LENGTH (target_type));
1596 case TYPE_CODE_ARRAY:
1597 if (TYPE_VECTOR (arg_type))
1598 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1599 value_contents (arg));
1602 case TYPE_CODE_STRUCT:
1603 case TYPE_CODE_UNION:
1604 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1606 /* Don't include static fields. */
1607 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1610 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1611 struct type *field_type = check_typedef (value_type (field));
1613 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1624 /* Implement the "push_dummy_call" gdbarch method. */
1627 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1628 struct regcache *regcache, CORE_ADDR bp_addr,
1630 struct value **args, CORE_ADDR sp,
1631 function_call_return_method return_method,
1632 CORE_ADDR struct_addr)
1635 struct aarch64_call_info info;
1637 /* We need to know what the type of the called function is in order
1638 to determine the number of named/anonymous arguments for the
1639 actual argument placement, and the return type in order to handle
1640 return value correctly.
1642 The generic code above us views the decision of return in memory
1643 or return in registers as a two stage processes. The language
1644 handler is consulted first and may decide to return in memory (eg
1645 class with copy constructor returned by value), this will cause
1646 the generic code to allocate space AND insert an initial leading
1649 If the language code does not decide to pass in memory then the
1650 target code is consulted.
1652 If the language code decides to pass in memory we want to move
1653 the pointer inserted as the initial argument from the argument
1654 list and into X8, the conventional AArch64 struct return pointer
1657 /* Set the return address. For the AArch64, the return breakpoint
1658 is always at BP_ADDR. */
1659 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1661 /* If we were given an initial argument for the return slot, lose it. */
1662 if (return_method == return_method_hidden_param)
1668 /* The struct_return pointer occupies X8. */
1669 if (return_method != return_method_normal)
1673 debug_printf ("struct return in %s = 0x%s\n",
1674 gdbarch_register_name (gdbarch,
1675 AARCH64_STRUCT_RETURN_REGNUM),
1676 paddress (gdbarch, struct_addr));
1678 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1682 for (argnum = 0; argnum < nargs; argnum++)
1684 struct value *arg = args[argnum];
1685 struct type *arg_type, *fundamental_type;
1688 arg_type = check_typedef (value_type (arg));
1689 len = TYPE_LENGTH (arg_type);
1691 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1692 if there are enough spare registers. */
1693 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1696 if (info.nsrn + elements <= 8)
1698 /* We know that we have sufficient registers available therefore
1699 this will never need to fallback to the stack. */
1700 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1702 gdb_assert_not_reached ("Failed to push args");
1707 pass_on_stack (&info, arg_type, arg);
1712 switch (TYPE_CODE (arg_type))
1715 case TYPE_CODE_BOOL:
1716 case TYPE_CODE_CHAR:
1717 case TYPE_CODE_RANGE:
1718 case TYPE_CODE_ENUM:
1721 /* Promote to 32 bit integer. */
1722 if (TYPE_UNSIGNED (arg_type))
1723 arg_type = builtin_type (gdbarch)->builtin_uint32;
1725 arg_type = builtin_type (gdbarch)->builtin_int32;
1726 arg = value_cast (arg_type, arg);
1728 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1731 case TYPE_CODE_STRUCT:
1732 case TYPE_CODE_ARRAY:
1733 case TYPE_CODE_UNION:
1736 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1737 invisible reference. */
1739 /* Allocate aligned storage. */
1740 sp = align_down (sp - len, 16);
1742 /* Write the real data into the stack. */
1743 write_memory (sp, value_contents (arg), len);
1745 /* Construct the indirection. */
1746 arg_type = lookup_pointer_type (arg_type);
1747 arg = value_from_pointer (arg_type, sp);
1748 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1751 /* PCS C.15 / C.18 multiple values pass. */
1752 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1756 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1761 /* Make sure stack retains 16 byte alignment. */
1763 sp -= 16 - (info.nsaa & 15);
1765 while (!info.si.empty ())
1767 const stack_item_t &si = info.si.back ();
1770 if (si.data != NULL)
1771 write_memory (sp, si.data, si.len);
1772 info.si.pop_back ();
1775 /* Finally, update the SP register. */
1776 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1781 /* Implement the "frame_align" gdbarch method. */
1784 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1786 /* Align the stack to sixteen bytes. */
1787 return sp & ~(CORE_ADDR) 15;
1790 /* Return the type for an AdvSISD Q register. */
1792 static struct type *
1793 aarch64_vnq_type (struct gdbarch *gdbarch)
1795 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1797 if (tdep->vnq_type == NULL)
1802 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1805 elem = builtin_type (gdbarch)->builtin_uint128;
1806 append_composite_type_field (t, "u", elem);
1808 elem = builtin_type (gdbarch)->builtin_int128;
1809 append_composite_type_field (t, "s", elem);
1814 return tdep->vnq_type;
1817 /* Return the type for an AdvSISD D register. */
1819 static struct type *
1820 aarch64_vnd_type (struct gdbarch *gdbarch)
1822 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1824 if (tdep->vnd_type == NULL)
1829 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1832 elem = builtin_type (gdbarch)->builtin_double;
1833 append_composite_type_field (t, "f", elem);
1835 elem = builtin_type (gdbarch)->builtin_uint64;
1836 append_composite_type_field (t, "u", elem);
1838 elem = builtin_type (gdbarch)->builtin_int64;
1839 append_composite_type_field (t, "s", elem);
1844 return tdep->vnd_type;
1847 /* Return the type for an AdvSISD S register. */
1849 static struct type *
1850 aarch64_vns_type (struct gdbarch *gdbarch)
1852 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1854 if (tdep->vns_type == NULL)
1859 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1862 elem = builtin_type (gdbarch)->builtin_float;
1863 append_composite_type_field (t, "f", elem);
1865 elem = builtin_type (gdbarch)->builtin_uint32;
1866 append_composite_type_field (t, "u", elem);
1868 elem = builtin_type (gdbarch)->builtin_int32;
1869 append_composite_type_field (t, "s", elem);
1874 return tdep->vns_type;
1877 /* Return the type for an AdvSISD H register. */
1879 static struct type *
1880 aarch64_vnh_type (struct gdbarch *gdbarch)
1882 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1884 if (tdep->vnh_type == NULL)
1889 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1892 elem = builtin_type (gdbarch)->builtin_half;
1893 append_composite_type_field (t, "f", elem);
1895 elem = builtin_type (gdbarch)->builtin_uint16;
1896 append_composite_type_field (t, "u", elem);
1898 elem = builtin_type (gdbarch)->builtin_int16;
1899 append_composite_type_field (t, "s", elem);
1904 return tdep->vnh_type;
1907 /* Return the type for an AdvSISD B register. */
1909 static struct type *
1910 aarch64_vnb_type (struct gdbarch *gdbarch)
1912 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1914 if (tdep->vnb_type == NULL)
1919 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1922 elem = builtin_type (gdbarch)->builtin_uint8;
1923 append_composite_type_field (t, "u", elem);
1925 elem = builtin_type (gdbarch)->builtin_int8;
1926 append_composite_type_field (t, "s", elem);
1931 return tdep->vnb_type;
1934 /* Return the type for an AdvSISD V register. */
1936 static struct type *
1937 aarch64_vnv_type (struct gdbarch *gdbarch)
1939 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1941 if (tdep->vnv_type == NULL)
1943 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1944 slice from the non-pseudo vector registers. However NEON V registers
1945 are always vector registers, and need constructing as such. */
1946 const struct builtin_type *bt = builtin_type (gdbarch);
1948 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1951 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1953 append_composite_type_field (sub, "f",
1954 init_vector_type (bt->builtin_double, 2));
1955 append_composite_type_field (sub, "u",
1956 init_vector_type (bt->builtin_uint64, 2));
1957 append_composite_type_field (sub, "s",
1958 init_vector_type (bt->builtin_int64, 2));
1959 append_composite_type_field (t, "d", sub);
1961 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1963 append_composite_type_field (sub, "f",
1964 init_vector_type (bt->builtin_float, 4));
1965 append_composite_type_field (sub, "u",
1966 init_vector_type (bt->builtin_uint32, 4));
1967 append_composite_type_field (sub, "s",
1968 init_vector_type (bt->builtin_int32, 4));
1969 append_composite_type_field (t, "s", sub);
1971 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1973 append_composite_type_field (sub, "f",
1974 init_vector_type (bt->builtin_half, 8));
1975 append_composite_type_field (sub, "u",
1976 init_vector_type (bt->builtin_uint16, 8));
1977 append_composite_type_field (sub, "s",
1978 init_vector_type (bt->builtin_int16, 8));
1979 append_composite_type_field (t, "h", sub);
1981 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1983 append_composite_type_field (sub, "u",
1984 init_vector_type (bt->builtin_uint8, 16));
1985 append_composite_type_field (sub, "s",
1986 init_vector_type (bt->builtin_int8, 16));
1987 append_composite_type_field (t, "b", sub);
1989 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1991 append_composite_type_field (sub, "u",
1992 init_vector_type (bt->builtin_uint128, 1));
1993 append_composite_type_field (sub, "s",
1994 init_vector_type (bt->builtin_int128, 1));
1995 append_composite_type_field (t, "q", sub);
2000 return tdep->vnv_type;
2003 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2006 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2008 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2010 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2011 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2013 if (reg == AARCH64_DWARF_SP)
2014 return AARCH64_SP_REGNUM;
2016 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2017 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2019 if (reg == AARCH64_DWARF_SVE_VG)
2020 return AARCH64_SVE_VG_REGNUM;
2022 if (reg == AARCH64_DWARF_SVE_FFR)
2023 return AARCH64_SVE_FFR_REGNUM;
2025 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2026 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2028 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2029 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2031 if (tdep->has_pauth ())
2033 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2034 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2036 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2037 return tdep->pauth_ra_state_regnum;
2043 /* Implement the "print_insn" gdbarch method. */
2046 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2048 info->symbols = NULL;
2049 return default_print_insn (memaddr, info);
2052 /* AArch64 BRK software debug mode instruction.
2053 Note that AArch64 code is always little-endian.
2054 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2055 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2057 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2059 /* Extract from an array REGS containing the (raw) register state a
2060 function return value of type TYPE, and copy that, in virtual
2061 format, into VALBUF. */
2064 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2067 struct gdbarch *gdbarch = regs->arch ();
2068 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2070 struct type *fundamental_type;
2072 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2075 int len = TYPE_LENGTH (fundamental_type);
2077 for (int i = 0; i < elements; i++)
2079 int regno = AARCH64_V0_REGNUM + i;
2080 /* Enough space for a full vector register. */
2081 gdb_byte buf[register_size (gdbarch, regno)];
2082 gdb_assert (len <= sizeof (buf));
2086 debug_printf ("read HFA or HVA return value element %d from %s\n",
2088 gdbarch_register_name (gdbarch, regno));
2090 regs->cooked_read (regno, buf);
2092 memcpy (valbuf, buf, len);
2096 else if (TYPE_CODE (type) == TYPE_CODE_INT
2097 || TYPE_CODE (type) == TYPE_CODE_CHAR
2098 || TYPE_CODE (type) == TYPE_CODE_BOOL
2099 || TYPE_CODE (type) == TYPE_CODE_PTR
2100 || TYPE_IS_REFERENCE (type)
2101 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2103 /* If the type is a plain integer, then the access is
2104 straight-forward. Otherwise we have to play around a bit
2106 int len = TYPE_LENGTH (type);
2107 int regno = AARCH64_X0_REGNUM;
2112 /* By using store_unsigned_integer we avoid having to do
2113 anything special for small big-endian values. */
2114 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2115 store_unsigned_integer (valbuf,
2116 (len > X_REGISTER_SIZE
2117 ? X_REGISTER_SIZE : len), byte_order, tmp);
2118 len -= X_REGISTER_SIZE;
2119 valbuf += X_REGISTER_SIZE;
2124 /* For a structure or union the behaviour is as if the value had
2125 been stored to word-aligned memory and then loaded into
2126 registers with 64-bit load instruction(s). */
2127 int len = TYPE_LENGTH (type);
2128 int regno = AARCH64_X0_REGNUM;
2129 bfd_byte buf[X_REGISTER_SIZE];
2133 regs->cooked_read (regno++, buf);
2134 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2135 len -= X_REGISTER_SIZE;
2136 valbuf += X_REGISTER_SIZE;
2142 /* Will a function return an aggregate type in memory or in a
2143 register? Return 0 if an aggregate type can be returned in a
2144 register, 1 if it must be returned in memory. */
2147 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2149 type = check_typedef (type);
2151 struct type *fundamental_type;
2153 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2156 /* v0-v7 are used to return values and one register is allocated
2157 for one member. However, HFA or HVA has at most four members. */
2161 if (TYPE_LENGTH (type) > 16)
2163 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2164 invisible reference. */
2172 /* Write into appropriate registers a function return value of type
2173 TYPE, given in virtual format. */
2176 aarch64_store_return_value (struct type *type, struct regcache *regs,
2177 const gdb_byte *valbuf)
2179 struct gdbarch *gdbarch = regs->arch ();
2180 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2182 struct type *fundamental_type;
2184 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2187 int len = TYPE_LENGTH (fundamental_type);
2189 for (int i = 0; i < elements; i++)
2191 int regno = AARCH64_V0_REGNUM + i;
2192 /* Enough space for a full vector register. */
2193 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2194 gdb_assert (len <= sizeof (tmpbuf));
2198 debug_printf ("write HFA or HVA return value element %d to %s\n",
2200 gdbarch_register_name (gdbarch, regno));
2203 memcpy (tmpbuf, valbuf,
2204 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2205 regs->cooked_write (regno, tmpbuf);
2209 else if (TYPE_CODE (type) == TYPE_CODE_INT
2210 || TYPE_CODE (type) == TYPE_CODE_CHAR
2211 || TYPE_CODE (type) == TYPE_CODE_BOOL
2212 || TYPE_CODE (type) == TYPE_CODE_PTR
2213 || TYPE_IS_REFERENCE (type)
2214 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2216 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2218 /* Values of one word or less are zero/sign-extended and
2220 bfd_byte tmpbuf[X_REGISTER_SIZE];
2221 LONGEST val = unpack_long (type, valbuf);
2223 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2224 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2228 /* Integral values greater than one word are stored in
2229 consecutive registers starting with r0. This will always
2230 be a multiple of the regiser size. */
2231 int len = TYPE_LENGTH (type);
2232 int regno = AARCH64_X0_REGNUM;
2236 regs->cooked_write (regno++, valbuf);
2237 len -= X_REGISTER_SIZE;
2238 valbuf += X_REGISTER_SIZE;
2244 /* For a structure or union the behaviour is as if the value had
2245 been stored to word-aligned memory and then loaded into
2246 registers with 64-bit load instruction(s). */
2247 int len = TYPE_LENGTH (type);
2248 int regno = AARCH64_X0_REGNUM;
2249 bfd_byte tmpbuf[X_REGISTER_SIZE];
2253 memcpy (tmpbuf, valbuf,
2254 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2255 regs->cooked_write (regno++, tmpbuf);
2256 len -= X_REGISTER_SIZE;
2257 valbuf += X_REGISTER_SIZE;
2262 /* Implement the "return_value" gdbarch method. */
2264 static enum return_value_convention
2265 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2266 struct type *valtype, struct regcache *regcache,
2267 gdb_byte *readbuf, const gdb_byte *writebuf)
2270 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2271 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2272 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2274 if (aarch64_return_in_memory (gdbarch, valtype))
2277 debug_printf ("return value in memory\n");
2278 return RETURN_VALUE_STRUCT_CONVENTION;
2283 aarch64_store_return_value (valtype, regcache, writebuf);
2286 aarch64_extract_return_value (valtype, regcache, readbuf);
2289 debug_printf ("return value in registers\n");
2291 return RETURN_VALUE_REGISTER_CONVENTION;
2294 /* Implement the "get_longjmp_target" gdbarch method. */
2297 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2300 gdb_byte buf[X_REGISTER_SIZE];
2301 struct gdbarch *gdbarch = get_frame_arch (frame);
2302 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2303 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2305 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2307 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2311 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2315 /* Implement the "gen_return_address" gdbarch method. */
2318 aarch64_gen_return_address (struct gdbarch *gdbarch,
2319 struct agent_expr *ax, struct axs_value *value,
2322 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2323 value->kind = axs_lvalue_register;
2324 value->u.reg = AARCH64_LR_REGNUM;
2328 /* Return the pseudo register name corresponding to register regnum. */
2331 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2333 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2335 static const char *const q_name[] =
2337 "q0", "q1", "q2", "q3",
2338 "q4", "q5", "q6", "q7",
2339 "q8", "q9", "q10", "q11",
2340 "q12", "q13", "q14", "q15",
2341 "q16", "q17", "q18", "q19",
2342 "q20", "q21", "q22", "q23",
2343 "q24", "q25", "q26", "q27",
2344 "q28", "q29", "q30", "q31",
2347 static const char *const d_name[] =
2349 "d0", "d1", "d2", "d3",
2350 "d4", "d5", "d6", "d7",
2351 "d8", "d9", "d10", "d11",
2352 "d12", "d13", "d14", "d15",
2353 "d16", "d17", "d18", "d19",
2354 "d20", "d21", "d22", "d23",
2355 "d24", "d25", "d26", "d27",
2356 "d28", "d29", "d30", "d31",
2359 static const char *const s_name[] =
2361 "s0", "s1", "s2", "s3",
2362 "s4", "s5", "s6", "s7",
2363 "s8", "s9", "s10", "s11",
2364 "s12", "s13", "s14", "s15",
2365 "s16", "s17", "s18", "s19",
2366 "s20", "s21", "s22", "s23",
2367 "s24", "s25", "s26", "s27",
2368 "s28", "s29", "s30", "s31",
2371 static const char *const h_name[] =
2373 "h0", "h1", "h2", "h3",
2374 "h4", "h5", "h6", "h7",
2375 "h8", "h9", "h10", "h11",
2376 "h12", "h13", "h14", "h15",
2377 "h16", "h17", "h18", "h19",
2378 "h20", "h21", "h22", "h23",
2379 "h24", "h25", "h26", "h27",
2380 "h28", "h29", "h30", "h31",
2383 static const char *const b_name[] =
2385 "b0", "b1", "b2", "b3",
2386 "b4", "b5", "b6", "b7",
2387 "b8", "b9", "b10", "b11",
2388 "b12", "b13", "b14", "b15",
2389 "b16", "b17", "b18", "b19",
2390 "b20", "b21", "b22", "b23",
2391 "b24", "b25", "b26", "b27",
2392 "b28", "b29", "b30", "b31",
2395 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2397 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2398 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2400 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2401 return d_name[p_regnum - AARCH64_D0_REGNUM];
2403 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2404 return s_name[p_regnum - AARCH64_S0_REGNUM];
2406 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2407 return h_name[p_regnum - AARCH64_H0_REGNUM];
2409 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2410 return b_name[p_regnum - AARCH64_B0_REGNUM];
2412 if (tdep->has_sve ())
2414 static const char *const sve_v_name[] =
2416 "v0", "v1", "v2", "v3",
2417 "v4", "v5", "v6", "v7",
2418 "v8", "v9", "v10", "v11",
2419 "v12", "v13", "v14", "v15",
2420 "v16", "v17", "v18", "v19",
2421 "v20", "v21", "v22", "v23",
2422 "v24", "v25", "v26", "v27",
2423 "v28", "v29", "v30", "v31",
2426 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2427 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2428 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2431 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2432 prevents it from being read by methods such as
2433 mi_cmd_trace_frame_collected. */
2434 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2437 internal_error (__FILE__, __LINE__,
2438 _("aarch64_pseudo_register_name: bad register number %d"),
2442 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2444 static struct type *
2445 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2447 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2449 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2451 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2452 return aarch64_vnq_type (gdbarch);
2454 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2455 return aarch64_vnd_type (gdbarch);
2457 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2458 return aarch64_vns_type (gdbarch);
2460 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2461 return aarch64_vnh_type (gdbarch);
2463 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2464 return aarch64_vnb_type (gdbarch);
2466 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2467 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2468 return aarch64_vnv_type (gdbarch);
2470 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2471 return builtin_type (gdbarch)->builtin_uint64;
2473 internal_error (__FILE__, __LINE__,
2474 _("aarch64_pseudo_register_type: bad register number %d"),
2478 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2481 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2482 struct reggroup *group)
2484 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2486 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2488 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2489 return group == all_reggroup || group == vector_reggroup;
2490 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2491 return (group == all_reggroup || group == vector_reggroup
2492 || group == float_reggroup);
2493 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2494 return (group == all_reggroup || group == vector_reggroup
2495 || group == float_reggroup);
2496 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2497 return group == all_reggroup || group == vector_reggroup;
2498 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2499 return group == all_reggroup || group == vector_reggroup;
2500 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2501 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2502 return group == all_reggroup || group == vector_reggroup;
2503 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2504 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2507 return group == all_reggroup;
2510 /* Helper for aarch64_pseudo_read_value. */
2512 static struct value *
2513 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2514 readable_regcache *regcache, int regnum_offset,
2515 int regsize, struct value *result_value)
2517 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2519 /* Enough space for a full vector register. */
2520 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2521 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2523 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2524 mark_value_bytes_unavailable (result_value, 0,
2525 TYPE_LENGTH (value_type (result_value)));
2527 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2529 return result_value;
2532 /* Implement the "pseudo_register_read_value" gdbarch method. */
2534 static struct value *
2535 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2538 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2539 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2541 VALUE_LVAL (result_value) = lval_register;
2542 VALUE_REGNUM (result_value) = regnum;
2544 regnum -= gdbarch_num_regs (gdbarch);
2546 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2547 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2548 regnum - AARCH64_Q0_REGNUM,
2549 Q_REGISTER_SIZE, result_value);
2551 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2552 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2553 regnum - AARCH64_D0_REGNUM,
2554 D_REGISTER_SIZE, result_value);
2556 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2557 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2558 regnum - AARCH64_S0_REGNUM,
2559 S_REGISTER_SIZE, result_value);
2561 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2562 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2563 regnum - AARCH64_H0_REGNUM,
2564 H_REGISTER_SIZE, result_value);
2566 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2567 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2568 regnum - AARCH64_B0_REGNUM,
2569 B_REGISTER_SIZE, result_value);
2571 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2572 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2573 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2574 regnum - AARCH64_SVE_V0_REGNUM,
2575 V_REGISTER_SIZE, result_value);
2577 gdb_assert_not_reached ("regnum out of bound");
2580 /* Helper for aarch64_pseudo_write. */
2583 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2584 int regnum_offset, int regsize, const gdb_byte *buf)
2586 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2588 /* Enough space for a full vector register. */
2589 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2590 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2592 /* Ensure the register buffer is zero, we want gdb writes of the
2593 various 'scalar' pseudo registers to behavior like architectural
2594 writes, register width bytes are written the remainder are set to
2596 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2598 memcpy (reg_buf, buf, regsize);
2599 regcache->raw_write (v_regnum, reg_buf);
2602 /* Implement the "pseudo_register_write" gdbarch method. */
2605 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2606 int regnum, const gdb_byte *buf)
2608 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2609 regnum -= gdbarch_num_regs (gdbarch);
2611 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2612 return aarch64_pseudo_write_1 (gdbarch, regcache,
2613 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2616 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2617 return aarch64_pseudo_write_1 (gdbarch, regcache,
2618 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2621 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2622 return aarch64_pseudo_write_1 (gdbarch, regcache,
2623 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2626 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2627 return aarch64_pseudo_write_1 (gdbarch, regcache,
2628 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2631 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2632 return aarch64_pseudo_write_1 (gdbarch, regcache,
2633 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2636 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2637 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2638 return aarch64_pseudo_write_1 (gdbarch, regcache,
2639 regnum - AARCH64_SVE_V0_REGNUM,
2640 V_REGISTER_SIZE, buf);
2642 gdb_assert_not_reached ("regnum out of bound");
2645 /* Callback function for user_reg_add. */
2647 static struct value *
2648 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2650 const int *reg_p = (const int *) baton;
2652 return value_of_register (*reg_p, frame);
2656 /* Implement the "software_single_step" gdbarch method, needed to
2657 single step through atomic sequences on AArch64. */
2659 static std::vector<CORE_ADDR>
2660 aarch64_software_single_step (struct regcache *regcache)
2662 struct gdbarch *gdbarch = regcache->arch ();
2663 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2664 const int insn_size = 4;
2665 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2666 CORE_ADDR pc = regcache_read_pc (regcache);
2667 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2669 CORE_ADDR closing_insn = 0;
2670 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2671 byte_order_for_code);
2674 int bc_insn_count = 0; /* Conditional branch instruction count. */
2675 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2678 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2681 /* Look for a Load Exclusive instruction which begins the sequence. */
2682 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2685 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2688 insn = read_memory_unsigned_integer (loc, insn_size,
2689 byte_order_for_code);
2691 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2693 /* Check if the instruction is a conditional branch. */
2694 if (inst.opcode->iclass == condbranch)
2696 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2698 if (bc_insn_count >= 1)
2701 /* It is, so we'll try to set a breakpoint at the destination. */
2702 breaks[1] = loc + inst.operands[0].imm.value;
2708 /* Look for the Store Exclusive which closes the atomic sequence. */
2709 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2716 /* We didn't find a closing Store Exclusive instruction, fall back. */
2720 /* Insert breakpoint after the end of the atomic sequence. */
2721 breaks[0] = loc + insn_size;
2723 /* Check for duplicated breakpoints, and also check that the second
2724 breakpoint is not within the atomic sequence. */
2726 && (breaks[1] == breaks[0]
2727 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2728 last_breakpoint = 0;
2730 std::vector<CORE_ADDR> next_pcs;
2732 /* Insert the breakpoint at the end of the sequence, and one at the
2733 destination of the conditional branch, if it exists. */
2734 for (index = 0; index <= last_breakpoint; index++)
2735 next_pcs.push_back (breaks[index]);
2740 struct aarch64_displaced_step_closure : public displaced_step_closure
2742 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2743 is being displaced stepping. */
2746 /* PC adjustment offset after displaced stepping. */
2747 int32_t pc_adjust = 0;
2750 /* Data when visiting instructions for displaced stepping. */
2752 struct aarch64_displaced_step_data
2754 struct aarch64_insn_data base;
2756 /* The address where the instruction will be executed at. */
2758 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2759 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2760 /* Number of instructions in INSN_BUF. */
2761 unsigned insn_count;
2762 /* Registers when doing displaced stepping. */
2763 struct regcache *regs;
2765 aarch64_displaced_step_closure *dsc;
2768 /* Implementation of aarch64_insn_visitor method "b". */
2771 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2772 struct aarch64_insn_data *data)
2774 struct aarch64_displaced_step_data *dsd
2775 = (struct aarch64_displaced_step_data *) data;
2776 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2778 if (can_encode_int32 (new_offset, 28))
2780 /* Emit B rather than BL, because executing BL on a new address
2781 will get the wrong address into LR. In order to avoid this,
2782 we emit B, and update LR if the instruction is BL. */
2783 emit_b (dsd->insn_buf, 0, new_offset);
2789 emit_nop (dsd->insn_buf);
2791 dsd->dsc->pc_adjust = offset;
2797 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2798 data->insn_addr + 4);
2802 /* Implementation of aarch64_insn_visitor method "b_cond". */
2805 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2806 struct aarch64_insn_data *data)
2808 struct aarch64_displaced_step_data *dsd
2809 = (struct aarch64_displaced_step_data *) data;
2811 /* GDB has to fix up PC after displaced step this instruction
2812 differently according to the condition is true or false. Instead
2813 of checking COND against conditional flags, we can use
2814 the following instructions, and GDB can tell how to fix up PC
2815 according to the PC value.
2817 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2823 emit_bcond (dsd->insn_buf, cond, 8);
2825 dsd->dsc->pc_adjust = offset;
2826 dsd->insn_count = 1;
2829 /* Dynamically allocate a new register. If we know the register
2830 statically, we should make it a global as above instead of using this
2833 static struct aarch64_register
2834 aarch64_register (unsigned num, int is64)
2836 return (struct aarch64_register) { num, is64 };
2839 /* Implementation of aarch64_insn_visitor method "cb". */
2842 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2843 const unsigned rn, int is64,
2844 struct aarch64_insn_data *data)
2846 struct aarch64_displaced_step_data *dsd
2847 = (struct aarch64_displaced_step_data *) data;
2849 /* The offset is out of range for a compare and branch
2850 instruction. We can use the following instructions instead:
2852 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2857 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2858 dsd->insn_count = 1;
2860 dsd->dsc->pc_adjust = offset;
2863 /* Implementation of aarch64_insn_visitor method "tb". */
2866 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2867 const unsigned rt, unsigned bit,
2868 struct aarch64_insn_data *data)
2870 struct aarch64_displaced_step_data *dsd
2871 = (struct aarch64_displaced_step_data *) data;
2873 /* The offset is out of range for a test bit and branch
2874 instruction We can use the following instructions instead:
2876 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2882 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2883 dsd->insn_count = 1;
2885 dsd->dsc->pc_adjust = offset;
2888 /* Implementation of aarch64_insn_visitor method "adr". */
2891 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2892 const int is_adrp, struct aarch64_insn_data *data)
2894 struct aarch64_displaced_step_data *dsd
2895 = (struct aarch64_displaced_step_data *) data;
2896 /* We know exactly the address the ADR{P,} instruction will compute.
2897 We can just write it to the destination register. */
2898 CORE_ADDR address = data->insn_addr + offset;
2902 /* Clear the lower 12 bits of the offset to get the 4K page. */
2903 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2907 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2910 dsd->dsc->pc_adjust = 4;
2911 emit_nop (dsd->insn_buf);
2912 dsd->insn_count = 1;
2915 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2918 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2919 const unsigned rt, const int is64,
2920 struct aarch64_insn_data *data)
2922 struct aarch64_displaced_step_data *dsd
2923 = (struct aarch64_displaced_step_data *) data;
2924 CORE_ADDR address = data->insn_addr + offset;
2925 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2927 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2931 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2932 aarch64_register (rt, 1), zero);
2934 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2935 aarch64_register (rt, 1), zero);
2937 dsd->dsc->pc_adjust = 4;
2940 /* Implementation of aarch64_insn_visitor method "others". */
2943 aarch64_displaced_step_others (const uint32_t insn,
2944 struct aarch64_insn_data *data)
2946 struct aarch64_displaced_step_data *dsd
2947 = (struct aarch64_displaced_step_data *) data;
2949 aarch64_emit_insn (dsd->insn_buf, insn);
2950 dsd->insn_count = 1;
2952 if ((insn & 0xfffffc1f) == 0xd65f0000)
2955 dsd->dsc->pc_adjust = 0;
2958 dsd->dsc->pc_adjust = 4;
2961 static const struct aarch64_insn_visitor visitor =
2963 aarch64_displaced_step_b,
2964 aarch64_displaced_step_b_cond,
2965 aarch64_displaced_step_cb,
2966 aarch64_displaced_step_tb,
2967 aarch64_displaced_step_adr,
2968 aarch64_displaced_step_ldr_literal,
2969 aarch64_displaced_step_others,
2972 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2974 struct displaced_step_closure *
2975 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2976 CORE_ADDR from, CORE_ADDR to,
2977 struct regcache *regs)
2979 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2980 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2981 struct aarch64_displaced_step_data dsd;
2984 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2987 /* Look for a Load Exclusive instruction which begins the sequence. */
2988 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2990 /* We can't displaced step atomic sequences. */
2994 std::unique_ptr<aarch64_displaced_step_closure> dsc
2995 (new aarch64_displaced_step_closure);
2996 dsd.base.insn_addr = from;
2999 dsd.dsc = dsc.get ();
3001 aarch64_relocate_instruction (insn, &visitor,
3002 (struct aarch64_insn_data *) &dsd);
3003 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
3005 if (dsd.insn_count != 0)
3009 /* Instruction can be relocated to scratch pad. Copy
3010 relocated instruction(s) there. */
3011 for (i = 0; i < dsd.insn_count; i++)
3013 if (debug_displaced)
3015 debug_printf ("displaced: writing insn ");
3016 debug_printf ("%.8x", dsd.insn_buf[i]);
3017 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3019 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3020 (ULONGEST) dsd.insn_buf[i]);
3028 return dsc.release ();
3031 /* Implement the "displaced_step_fixup" gdbarch method. */
3034 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3035 struct displaced_step_closure *dsc_,
3036 CORE_ADDR from, CORE_ADDR to,
3037 struct regcache *regs)
3039 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3045 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3048 /* Condition is true. */
3050 else if (pc - to == 4)
3052 /* Condition is false. */
3056 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3059 if (dsc->pc_adjust != 0)
3061 if (debug_displaced)
3063 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3064 paddress (gdbarch, from), dsc->pc_adjust);
3066 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3067 from + dsc->pc_adjust);
3071 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3074 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3075 struct displaced_step_closure *closure)
3080 /* Get the correct target description for the given VQ value.
3081 If VQ is zero then it is assumed SVE is not supported.
3082 (It is not possible to set VQ to zero on an SVE system). */
3085 aarch64_read_description (uint64_t vq, bool pauth_p)
3087 if (vq > AARCH64_MAX_SVE_VQ)
3088 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3089 AARCH64_MAX_SVE_VQ);
3091 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3095 tdesc = aarch64_create_target_description (vq, pauth_p);
3096 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3102 /* Return the VQ used when creating the target description TDESC. */
3105 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3107 const struct tdesc_feature *feature_sve;
3109 if (!tdesc_has_registers (tdesc))
3112 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3114 if (feature_sve == nullptr)
3117 uint64_t vl = tdesc_register_bitsize (feature_sve,
3118 aarch64_sve_register_names[0]) / 8;
3119 return sve_vq_from_vl (vl);
3122 /* Add all the expected register sets into GDBARCH. */
3125 aarch64_add_reggroups (struct gdbarch *gdbarch)
3127 reggroup_add (gdbarch, general_reggroup);
3128 reggroup_add (gdbarch, float_reggroup);
3129 reggroup_add (gdbarch, system_reggroup);
3130 reggroup_add (gdbarch, vector_reggroup);
3131 reggroup_add (gdbarch, all_reggroup);
3132 reggroup_add (gdbarch, save_reggroup);
3133 reggroup_add (gdbarch, restore_reggroup);
3136 /* Implement the "cannot_store_register" gdbarch method. */
3139 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3141 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3143 if (!tdep->has_pauth ())
3146 /* Pointer authentication registers are read-only. */
3147 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3148 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3151 /* Initialize the current architecture based on INFO. If possible,
3152 re-use an architecture from ARCHES, which is a list of
3153 architectures already created during this debugging session.
3155 Called e.g. at program startup, when reading a core file, and when
3156 reading a binary file. */
3158 static struct gdbarch *
3159 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3161 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3162 const struct tdesc_feature *feature_pauth;
3163 bool valid_p = true;
3164 int i, num_regs = 0, num_pseudo_regs = 0;
3165 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3167 /* Use the vector length passed via the target info. Here -1 is used for no
3168 SVE, and 0 is unset. If unset then use the vector length from the existing
3171 if (info.id == (int *) -1)
3173 else if (info.id != 0)
3174 vq = (uint64_t) info.id;
3176 vq = aarch64_get_tdesc_vq (info.target_desc);
3178 if (vq > AARCH64_MAX_SVE_VQ)
3179 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3180 pulongest (vq), AARCH64_MAX_SVE_VQ);
3182 /* If there is already a candidate, use it. */
3183 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3184 best_arch != nullptr;
3185 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3187 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3188 if (tdep && tdep->vq == vq)
3189 return best_arch->gdbarch;
3192 /* Ensure we always have a target descriptor, and that it is for the given VQ
3194 const struct target_desc *tdesc = info.target_desc;
3195 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3196 tdesc = aarch64_read_description (vq, false);
3199 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3200 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3201 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3202 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3204 if (feature_core == nullptr)
3207 struct tdesc_arch_data *tdesc_data = tdesc_data_alloc ();
3209 /* Validate the description provides the mandatory core R registers
3210 and allocate their numbers. */
3211 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3212 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3213 AARCH64_X0_REGNUM + i,
3214 aarch64_r_register_names[i]);
3216 num_regs = AARCH64_X0_REGNUM + i;
3218 /* Add the V registers. */
3219 if (feature_fpu != nullptr)
3221 if (feature_sve != nullptr)
3222 error (_("Program contains both fpu and SVE features."));
3224 /* Validate the description provides the mandatory V registers
3225 and allocate their numbers. */
3226 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3227 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3228 AARCH64_V0_REGNUM + i,
3229 aarch64_v_register_names[i]);
3231 num_regs = AARCH64_V0_REGNUM + i;
3234 /* Add the SVE registers. */
3235 if (feature_sve != nullptr)
3237 /* Validate the description provides the mandatory SVE registers
3238 and allocate their numbers. */
3239 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3240 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3241 AARCH64_SVE_Z0_REGNUM + i,
3242 aarch64_sve_register_names[i]);
3244 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3245 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3248 if (feature_fpu != nullptr || feature_sve != nullptr)
3250 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3251 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3252 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3253 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3254 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3257 /* Add the pauth registers. */
3258 if (feature_pauth != NULL)
3260 first_pauth_regnum = num_regs;
3261 pauth_ra_state_offset = num_pseudo_regs;
3262 /* Validate the descriptor provides the mandatory PAUTH registers and
3263 allocate their numbers. */
3264 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3265 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3266 first_pauth_regnum + i,
3267 aarch64_pauth_register_names[i]);
3270 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3275 tdesc_data_cleanup (tdesc_data);
3279 /* AArch64 code is always little-endian. */
3280 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3282 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3283 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3285 /* This should be low enough for everything. */
3286 tdep->lowest_pc = 0x20;
3287 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3288 tdep->jb_elt_size = 8;
3290 tdep->pauth_reg_base = first_pauth_regnum;
3291 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3292 : pauth_ra_state_offset + num_regs;
3294 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3295 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3297 /* Advance PC across function entry code. */
3298 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3300 /* The stack grows downward. */
3301 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3303 /* Breakpoint manipulation. */
3304 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3305 aarch64_breakpoint::kind_from_pc);
3306 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3307 aarch64_breakpoint::bp_from_kind);
3308 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3309 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3311 /* Information about registers, etc. */
3312 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3313 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3314 set_gdbarch_num_regs (gdbarch, num_regs);
3316 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3317 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3318 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3319 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3320 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3321 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3322 aarch64_pseudo_register_reggroup_p);
3323 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3326 set_gdbarch_short_bit (gdbarch, 16);
3327 set_gdbarch_int_bit (gdbarch, 32);
3328 set_gdbarch_float_bit (gdbarch, 32);
3329 set_gdbarch_double_bit (gdbarch, 64);
3330 set_gdbarch_long_double_bit (gdbarch, 128);
3331 set_gdbarch_long_bit (gdbarch, 64);
3332 set_gdbarch_long_long_bit (gdbarch, 64);
3333 set_gdbarch_ptr_bit (gdbarch, 64);
3334 set_gdbarch_char_signed (gdbarch, 0);
3335 set_gdbarch_wchar_signed (gdbarch, 0);
3336 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3337 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3338 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3339 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3341 /* Internal <-> external register number maps. */
3342 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3344 /* Returning results. */
3345 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3348 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3350 /* Virtual tables. */
3351 set_gdbarch_vbit_in_delta (gdbarch, 1);
3353 /* Register architecture. */
3354 aarch64_add_reggroups (gdbarch);
3356 /* Hook in the ABI-specific overrides, if they have been registered. */
3357 info.target_desc = tdesc;
3358 info.tdesc_data = tdesc_data;
3359 gdbarch_init_osabi (info, gdbarch);
3361 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3362 /* Register DWARF CFA vendor handler. */
3363 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3364 aarch64_execute_dwarf_cfa_vendor_op);
3366 /* Add some default predicates. */
3367 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3368 dwarf2_append_unwinders (gdbarch);
3369 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3371 frame_base_set_default (gdbarch, &aarch64_normal_base);
3373 /* Now we have tuned the configuration, set a few final things,
3374 based on what the OS ABI has told us. */
3376 if (tdep->jb_pc >= 0)
3377 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3379 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3381 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3383 /* Add standard register aliases. */
3384 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3385 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3386 value_of_aarch64_user_reg,
3387 &aarch64_register_aliases[i].regnum);
3389 register_aarch64_ravenscar_ops (gdbarch);
3395 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3397 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3402 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3403 paddress (gdbarch, tdep->lowest_pc));
3409 static void aarch64_process_record_test (void);
3414 _initialize_aarch64_tdep (void)
3416 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3419 /* Debug this file's internals. */
3420 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3421 Set AArch64 debugging."), _("\
3422 Show AArch64 debugging."), _("\
3423 When on, AArch64 specific debugging is enabled."),
3426 &setdebuglist, &showdebuglist);
3429 selftests::register_test ("aarch64-analyze-prologue",
3430 selftests::aarch64_analyze_prologue_test);
3431 selftests::register_test ("aarch64-process-record",
3432 selftests::aarch64_process_record_test);
3433 selftests::record_xml_tdesc ("aarch64.xml",
3434 aarch64_create_target_description (0, false));
3438 /* AArch64 process record-replay related structures, defines etc. */
3440 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3443 unsigned int reg_len = LENGTH; \
3446 REGS = XNEWVEC (uint32_t, reg_len); \
3447 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3452 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3455 unsigned int mem_len = LENGTH; \
3458 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3459 memcpy(&MEMS->len, &RECORD_BUF[0], \
3460 sizeof(struct aarch64_mem_r) * LENGTH); \
3465 /* AArch64 record/replay structures and enumerations. */
3467 struct aarch64_mem_r
3469 uint64_t len; /* Record length. */
3470 uint64_t addr; /* Memory address. */
3473 enum aarch64_record_result
3475 AARCH64_RECORD_SUCCESS,
3476 AARCH64_RECORD_UNSUPPORTED,
3477 AARCH64_RECORD_UNKNOWN
3480 typedef struct insn_decode_record_t
3482 struct gdbarch *gdbarch;
3483 struct regcache *regcache;
3484 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3485 uint32_t aarch64_insn; /* Insn to be recorded. */
3486 uint32_t mem_rec_count; /* Count of memory records. */
3487 uint32_t reg_rec_count; /* Count of register records. */
3488 uint32_t *aarch64_regs; /* Registers to be recorded. */
3489 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3490 } insn_decode_record;
3492 /* Record handler for data processing - register instructions. */
3495 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3497 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3498 uint32_t record_buf[4];
3500 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3501 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3502 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3504 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3508 /* Logical (shifted register). */
3509 if (insn_bits24_27 == 0x0a)
3510 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3512 else if (insn_bits24_27 == 0x0b)
3513 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3515 return AARCH64_RECORD_UNKNOWN;
3517 record_buf[0] = reg_rd;
3518 aarch64_insn_r->reg_rec_count = 1;
3520 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3524 if (insn_bits24_27 == 0x0b)
3526 /* Data-processing (3 source). */
3527 record_buf[0] = reg_rd;
3528 aarch64_insn_r->reg_rec_count = 1;
3530 else if (insn_bits24_27 == 0x0a)
3532 if (insn_bits21_23 == 0x00)
3534 /* Add/subtract (with carry). */
3535 record_buf[0] = reg_rd;
3536 aarch64_insn_r->reg_rec_count = 1;
3537 if (bit (aarch64_insn_r->aarch64_insn, 29))
3539 record_buf[1] = AARCH64_CPSR_REGNUM;
3540 aarch64_insn_r->reg_rec_count = 2;
3543 else if (insn_bits21_23 == 0x02)
3545 /* Conditional compare (register) and conditional compare
3546 (immediate) instructions. */
3547 record_buf[0] = AARCH64_CPSR_REGNUM;
3548 aarch64_insn_r->reg_rec_count = 1;
3550 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3552 /* CConditional select. */
3553 /* Data-processing (2 source). */
3554 /* Data-processing (1 source). */
3555 record_buf[0] = reg_rd;
3556 aarch64_insn_r->reg_rec_count = 1;
3559 return AARCH64_RECORD_UNKNOWN;
3563 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3565 return AARCH64_RECORD_SUCCESS;
3568 /* Record handler for data processing - immediate instructions. */
3571 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3573 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3574 uint32_t record_buf[4];
3576 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3577 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3578 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3580 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3581 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3582 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3584 record_buf[0] = reg_rd;
3585 aarch64_insn_r->reg_rec_count = 1;
3587 else if (insn_bits24_27 == 0x01)
3589 /* Add/Subtract (immediate). */
3590 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3591 record_buf[0] = reg_rd;
3592 aarch64_insn_r->reg_rec_count = 1;
3594 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3596 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3598 /* Logical (immediate). */
3599 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3600 record_buf[0] = reg_rd;
3601 aarch64_insn_r->reg_rec_count = 1;
3603 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3606 return AARCH64_RECORD_UNKNOWN;
3608 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3610 return AARCH64_RECORD_SUCCESS;
3613 /* Record handler for branch, exception generation and system instructions. */
3616 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3618 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3619 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3620 uint32_t record_buf[4];
3622 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3623 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3624 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3626 if (insn_bits28_31 == 0x0d)
3628 /* Exception generation instructions. */
3629 if (insn_bits24_27 == 0x04)
3631 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3632 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3633 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3635 ULONGEST svc_number;
3637 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3639 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3643 return AARCH64_RECORD_UNSUPPORTED;
3645 /* System instructions. */
3646 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3648 uint32_t reg_rt, reg_crn;
3650 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3651 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3653 /* Record rt in case of sysl and mrs instructions. */
3654 if (bit (aarch64_insn_r->aarch64_insn, 21))
3656 record_buf[0] = reg_rt;
3657 aarch64_insn_r->reg_rec_count = 1;
3659 /* Record cpsr for hint and msr(immediate) instructions. */
3660 else if (reg_crn == 0x02 || reg_crn == 0x04)
3662 record_buf[0] = AARCH64_CPSR_REGNUM;
3663 aarch64_insn_r->reg_rec_count = 1;
3666 /* Unconditional branch (register). */
3667 else if((insn_bits24_27 & 0x0e) == 0x06)
3669 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3670 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3671 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3674 return AARCH64_RECORD_UNKNOWN;
3676 /* Unconditional branch (immediate). */
3677 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3679 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3680 if (bit (aarch64_insn_r->aarch64_insn, 31))
3681 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3684 /* Compare & branch (immediate), Test & branch (immediate) and
3685 Conditional branch (immediate). */
3686 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3688 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3690 return AARCH64_RECORD_SUCCESS;
3693 /* Record handler for advanced SIMD load and store instructions. */
3696 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3699 uint64_t addr_offset = 0;
3700 uint32_t record_buf[24];
3701 uint64_t record_buf_mem[24];
3702 uint32_t reg_rn, reg_rt;
3703 uint32_t reg_index = 0, mem_index = 0;
3704 uint8_t opcode_bits, size_bits;
3706 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3707 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3708 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3709 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3710 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3713 debug_printf ("Process record: Advanced SIMD load/store\n");
3715 /* Load/store single structure. */
3716 if (bit (aarch64_insn_r->aarch64_insn, 24))
3718 uint8_t sindex, scale, selem, esize, replicate = 0;
3719 scale = opcode_bits >> 2;
3720 selem = ((opcode_bits & 0x02) |
3721 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3725 if (size_bits & 0x01)
3726 return AARCH64_RECORD_UNKNOWN;
3729 if ((size_bits >> 1) & 0x01)
3730 return AARCH64_RECORD_UNKNOWN;
3731 if (size_bits & 0x01)
3733 if (!((opcode_bits >> 1) & 0x01))
3736 return AARCH64_RECORD_UNKNOWN;
3740 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3747 return AARCH64_RECORD_UNKNOWN;
3753 for (sindex = 0; sindex < selem; sindex++)
3755 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3756 reg_rt = (reg_rt + 1) % 32;
3760 for (sindex = 0; sindex < selem; sindex++)
3762 if (bit (aarch64_insn_r->aarch64_insn, 22))
3763 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3766 record_buf_mem[mem_index++] = esize / 8;
3767 record_buf_mem[mem_index++] = address + addr_offset;
3769 addr_offset = addr_offset + (esize / 8);
3770 reg_rt = (reg_rt + 1) % 32;
3774 /* Load/store multiple structure. */
3777 uint8_t selem, esize, rpt, elements;
3778 uint8_t eindex, rindex;
3780 esize = 8 << size_bits;
3781 if (bit (aarch64_insn_r->aarch64_insn, 30))
3782 elements = 128 / esize;
3784 elements = 64 / esize;
3786 switch (opcode_bits)
3788 /*LD/ST4 (4 Registers). */
3793 /*LD/ST1 (4 Registers). */
3798 /*LD/ST3 (3 Registers). */
3803 /*LD/ST1 (3 Registers). */
3808 /*LD/ST1 (1 Register). */
3813 /*LD/ST2 (2 Registers). */
3818 /*LD/ST1 (2 Registers). */
3824 return AARCH64_RECORD_UNSUPPORTED;
3827 for (rindex = 0; rindex < rpt; rindex++)
3828 for (eindex = 0; eindex < elements; eindex++)
3830 uint8_t reg_tt, sindex;
3831 reg_tt = (reg_rt + rindex) % 32;
3832 for (sindex = 0; sindex < selem; sindex++)
3834 if (bit (aarch64_insn_r->aarch64_insn, 22))
3835 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3838 record_buf_mem[mem_index++] = esize / 8;
3839 record_buf_mem[mem_index++] = address + addr_offset;
3841 addr_offset = addr_offset + (esize / 8);
3842 reg_tt = (reg_tt + 1) % 32;
3847 if (bit (aarch64_insn_r->aarch64_insn, 23))
3848 record_buf[reg_index++] = reg_rn;
3850 aarch64_insn_r->reg_rec_count = reg_index;
3851 aarch64_insn_r->mem_rec_count = mem_index / 2;
3852 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3854 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3856 return AARCH64_RECORD_SUCCESS;
3859 /* Record handler for load and store instructions. */
3862 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3864 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3865 uint8_t insn_bit23, insn_bit21;
3866 uint8_t opc, size_bits, ld_flag, vector_flag;
3867 uint32_t reg_rn, reg_rt, reg_rt2;
3868 uint64_t datasize, offset;
3869 uint32_t record_buf[8];
3870 uint64_t record_buf_mem[8];
3873 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3874 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3875 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3876 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3877 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3878 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3879 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3880 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3881 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3882 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3883 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3885 /* Load/store exclusive. */
3886 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3889 debug_printf ("Process record: load/store exclusive\n");
3893 record_buf[0] = reg_rt;
3894 aarch64_insn_r->reg_rec_count = 1;
3897 record_buf[1] = reg_rt2;
3898 aarch64_insn_r->reg_rec_count = 2;
3904 datasize = (8 << size_bits) * 2;
3906 datasize = (8 << size_bits);
3907 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3909 record_buf_mem[0] = datasize / 8;
3910 record_buf_mem[1] = address;
3911 aarch64_insn_r->mem_rec_count = 1;
3914 /* Save register rs. */
3915 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3916 aarch64_insn_r->reg_rec_count = 1;
3920 /* Load register (literal) instructions decoding. */
3921 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3924 debug_printf ("Process record: load register (literal)\n");
3926 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3928 record_buf[0] = reg_rt;
3929 aarch64_insn_r->reg_rec_count = 1;
3931 /* All types of load/store pair instructions decoding. */
3932 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3935 debug_printf ("Process record: load/store pair\n");
3941 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3942 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3946 record_buf[0] = reg_rt;
3947 record_buf[1] = reg_rt2;
3949 aarch64_insn_r->reg_rec_count = 2;
3954 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3956 size_bits = size_bits >> 1;
3957 datasize = 8 << (2 + size_bits);
3958 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3959 offset = offset << (2 + size_bits);
3960 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3962 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3964 if (imm7_off & 0x40)
3965 address = address - offset;
3967 address = address + offset;
3970 record_buf_mem[0] = datasize / 8;
3971 record_buf_mem[1] = address;
3972 record_buf_mem[2] = datasize / 8;
3973 record_buf_mem[3] = address + (datasize / 8);
3974 aarch64_insn_r->mem_rec_count = 2;
3976 if (bit (aarch64_insn_r->aarch64_insn, 23))
3977 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3979 /* Load/store register (unsigned immediate) instructions. */
3980 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3982 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3992 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3994 /* PRFM (immediate) */
3995 return AARCH64_RECORD_SUCCESS;
3997 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3999 /* LDRSW (immediate) */
4013 debug_printf ("Process record: load/store (unsigned immediate):"
4014 " size %x V %d opc %x\n", size_bits, vector_flag,
4020 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4021 datasize = 8 << size_bits;
4022 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4024 offset = offset << size_bits;
4025 address = address + offset;
4027 record_buf_mem[0] = datasize >> 3;
4028 record_buf_mem[1] = address;
4029 aarch64_insn_r->mem_rec_count = 1;
4034 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4036 record_buf[0] = reg_rt;
4037 aarch64_insn_r->reg_rec_count = 1;
4040 /* Load/store register (register offset) instructions. */
4041 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4042 && insn_bits10_11 == 0x02 && insn_bit21)
4045 debug_printf ("Process record: load/store (register offset)\n");
4046 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4053 if (size_bits != 0x03)
4056 return AARCH64_RECORD_UNKNOWN;
4060 ULONGEST reg_rm_val;
4062 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4063 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
4064 if (bit (aarch64_insn_r->aarch64_insn, 12))
4065 offset = reg_rm_val << size_bits;
4067 offset = reg_rm_val;
4068 datasize = 8 << size_bits;
4069 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4071 address = address + offset;
4072 record_buf_mem[0] = datasize >> 3;
4073 record_buf_mem[1] = address;
4074 aarch64_insn_r->mem_rec_count = 1;
4079 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4081 record_buf[0] = reg_rt;
4082 aarch64_insn_r->reg_rec_count = 1;
4085 /* Load/store register (immediate and unprivileged) instructions. */
4086 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4091 debug_printf ("Process record: load/store "
4092 "(immediate and unprivileged)\n");
4094 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4101 if (size_bits != 0x03)
4104 return AARCH64_RECORD_UNKNOWN;
4109 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4110 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4111 datasize = 8 << size_bits;
4112 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4114 if (insn_bits10_11 != 0x01)
4116 if (imm9_off & 0x0100)
4117 address = address - offset;
4119 address = address + offset;
4121 record_buf_mem[0] = datasize >> 3;
4122 record_buf_mem[1] = address;
4123 aarch64_insn_r->mem_rec_count = 1;
4128 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4130 record_buf[0] = reg_rt;
4131 aarch64_insn_r->reg_rec_count = 1;
4133 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4134 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4136 /* Advanced SIMD load/store instructions. */
4138 return aarch64_record_asimd_load_store (aarch64_insn_r);
4140 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4142 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4144 return AARCH64_RECORD_SUCCESS;
4147 /* Record handler for data processing SIMD and floating point instructions. */
4150 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4152 uint8_t insn_bit21, opcode, rmode, reg_rd;
4153 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4154 uint8_t insn_bits11_14;
4155 uint32_t record_buf[2];
4157 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4158 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4159 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4160 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4161 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4162 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4163 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4164 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4165 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4168 debug_printf ("Process record: data processing SIMD/FP: ");
4170 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4172 /* Floating point - fixed point conversion instructions. */
4176 debug_printf ("FP - fixed point conversion");
4178 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4179 record_buf[0] = reg_rd;
4181 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4183 /* Floating point - conditional compare instructions. */
4184 else if (insn_bits10_11 == 0x01)
4187 debug_printf ("FP - conditional compare");
4189 record_buf[0] = AARCH64_CPSR_REGNUM;
4191 /* Floating point - data processing (2-source) and
4192 conditional select instructions. */
4193 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4196 debug_printf ("FP - DP (2-source)");
4198 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4200 else if (insn_bits10_11 == 0x00)
4202 /* Floating point - immediate instructions. */
4203 if ((insn_bits12_15 & 0x01) == 0x01
4204 || (insn_bits12_15 & 0x07) == 0x04)
4207 debug_printf ("FP - immediate");
4208 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4210 /* Floating point - compare instructions. */
4211 else if ((insn_bits12_15 & 0x03) == 0x02)
4214 debug_printf ("FP - immediate");
4215 record_buf[0] = AARCH64_CPSR_REGNUM;
4217 /* Floating point - integer conversions instructions. */
4218 else if (insn_bits12_15 == 0x00)
4220 /* Convert float to integer instruction. */
4221 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4224 debug_printf ("float to int conversion");
4226 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4228 /* Convert integer to float instruction. */
4229 else if ((opcode >> 1) == 0x01 && !rmode)
4232 debug_printf ("int to float conversion");
4234 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4236 /* Move float to integer instruction. */
4237 else if ((opcode >> 1) == 0x03)
4240 debug_printf ("move float to int");
4242 if (!(opcode & 0x01))
4243 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4245 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4248 return AARCH64_RECORD_UNKNOWN;
4251 return AARCH64_RECORD_UNKNOWN;
4254 return AARCH64_RECORD_UNKNOWN;
4256 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4259 debug_printf ("SIMD copy");
4261 /* Advanced SIMD copy instructions. */
4262 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4263 && !bit (aarch64_insn_r->aarch64_insn, 15)
4264 && bit (aarch64_insn_r->aarch64_insn, 10))
4266 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4267 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4269 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4272 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4274 /* All remaining floating point or advanced SIMD instructions. */
4278 debug_printf ("all remain");
4280 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4284 debug_printf ("\n");
4286 aarch64_insn_r->reg_rec_count++;
4287 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4288 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4290 return AARCH64_RECORD_SUCCESS;
4293 /* Decodes insns type and invokes its record handler. */
4296 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4298 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4300 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4301 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4302 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4303 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4305 /* Data processing - immediate instructions. */
4306 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4307 return aarch64_record_data_proc_imm (aarch64_insn_r);
4309 /* Branch, exception generation and system instructions. */
4310 if (ins_bit26 && !ins_bit27 && ins_bit28)
4311 return aarch64_record_branch_except_sys (aarch64_insn_r);
4313 /* Load and store instructions. */
4314 if (!ins_bit25 && ins_bit27)
4315 return aarch64_record_load_store (aarch64_insn_r);
4317 /* Data processing - register instructions. */
4318 if (ins_bit25 && !ins_bit26 && ins_bit27)
4319 return aarch64_record_data_proc_reg (aarch64_insn_r);
4321 /* Data processing - SIMD and floating point instructions. */
4322 if (ins_bit25 && ins_bit26 && ins_bit27)
4323 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4325 return AARCH64_RECORD_UNSUPPORTED;
4328 /* Cleans up local record registers and memory allocations. */
4331 deallocate_reg_mem (insn_decode_record *record)
4333 xfree (record->aarch64_regs);
4334 xfree (record->aarch64_mems);
4338 namespace selftests {
4341 aarch64_process_record_test (void)
4343 struct gdbarch_info info;
4346 gdbarch_info_init (&info);
4347 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4349 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4350 SELF_CHECK (gdbarch != NULL);
4352 insn_decode_record aarch64_record;
4354 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4355 aarch64_record.regcache = NULL;
4356 aarch64_record.this_addr = 0;
4357 aarch64_record.gdbarch = gdbarch;
4359 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4360 aarch64_record.aarch64_insn = 0xf9800020;
4361 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4362 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4363 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4364 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4366 deallocate_reg_mem (&aarch64_record);
4369 } // namespace selftests
4370 #endif /* GDB_SELF_TEST */
4372 /* Parse the current instruction and record the values of the registers and
4373 memory that will be changed in current instruction to record_arch_list
4374 return -1 if something is wrong. */
4377 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4378 CORE_ADDR insn_addr)
4380 uint32_t rec_no = 0;
4381 uint8_t insn_size = 4;
4383 gdb_byte buf[insn_size];
4384 insn_decode_record aarch64_record;
4386 memset (&buf[0], 0, insn_size);
4387 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4388 target_read_memory (insn_addr, &buf[0], insn_size);
4389 aarch64_record.aarch64_insn
4390 = (uint32_t) extract_unsigned_integer (&buf[0],
4392 gdbarch_byte_order (gdbarch));
4393 aarch64_record.regcache = regcache;
4394 aarch64_record.this_addr = insn_addr;
4395 aarch64_record.gdbarch = gdbarch;
4397 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4398 if (ret == AARCH64_RECORD_UNSUPPORTED)
4400 printf_unfiltered (_("Process record does not support instruction "
4401 "0x%0x at address %s.\n"),
4402 aarch64_record.aarch64_insn,
4403 paddress (gdbarch, insn_addr));
4409 /* Record registers. */
4410 record_full_arch_list_add_reg (aarch64_record.regcache,
4412 /* Always record register CPSR. */
4413 record_full_arch_list_add_reg (aarch64_record.regcache,
4414 AARCH64_CPSR_REGNUM);
4415 if (aarch64_record.aarch64_regs)
4416 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4417 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4418 aarch64_record.aarch64_regs[rec_no]))
4421 /* Record memories. */
4422 if (aarch64_record.aarch64_mems)
4423 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4424 if (record_full_arch_list_add_mem
4425 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4426 aarch64_record.aarch64_mems[rec_no].len))
4429 if (record_full_arch_list_add_end ())
4433 deallocate_reg_mem (&aarch64_record);