1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
47 #include "common/selftest.h"
49 #include "aarch64-tdep.h"
50 #include "aarch64-ravenscar-thread.h"
53 #include "elf/aarch64.h"
55 #include "common/vec.h"
58 #include "record-full.h"
59 #include "arch/aarch64-insn.h"
61 #include "opcode/aarch64.h"
64 #define submask(x) ((1L << ((x) + 1)) - 1)
65 #define bit(obj,st) (((obj) >> (st)) & 1)
66 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
68 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
70 #define HA_MAX_NUM_FLDS 4
72 /* All possible aarch64 target descriptors. */
73 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
75 /* The standard register names, and all the valid aliases for them. */
78 const char *const name;
80 } aarch64_register_aliases[] =
82 /* 64-bit register names. */
83 {"fp", AARCH64_FP_REGNUM},
84 {"lr", AARCH64_LR_REGNUM},
85 {"sp", AARCH64_SP_REGNUM},
87 /* 32-bit register names. */
88 {"w0", AARCH64_X0_REGNUM + 0},
89 {"w1", AARCH64_X0_REGNUM + 1},
90 {"w2", AARCH64_X0_REGNUM + 2},
91 {"w3", AARCH64_X0_REGNUM + 3},
92 {"w4", AARCH64_X0_REGNUM + 4},
93 {"w5", AARCH64_X0_REGNUM + 5},
94 {"w6", AARCH64_X0_REGNUM + 6},
95 {"w7", AARCH64_X0_REGNUM + 7},
96 {"w8", AARCH64_X0_REGNUM + 8},
97 {"w9", AARCH64_X0_REGNUM + 9},
98 {"w10", AARCH64_X0_REGNUM + 10},
99 {"w11", AARCH64_X0_REGNUM + 11},
100 {"w12", AARCH64_X0_REGNUM + 12},
101 {"w13", AARCH64_X0_REGNUM + 13},
102 {"w14", AARCH64_X0_REGNUM + 14},
103 {"w15", AARCH64_X0_REGNUM + 15},
104 {"w16", AARCH64_X0_REGNUM + 16},
105 {"w17", AARCH64_X0_REGNUM + 17},
106 {"w18", AARCH64_X0_REGNUM + 18},
107 {"w19", AARCH64_X0_REGNUM + 19},
108 {"w20", AARCH64_X0_REGNUM + 20},
109 {"w21", AARCH64_X0_REGNUM + 21},
110 {"w22", AARCH64_X0_REGNUM + 22},
111 {"w23", AARCH64_X0_REGNUM + 23},
112 {"w24", AARCH64_X0_REGNUM + 24},
113 {"w25", AARCH64_X0_REGNUM + 25},
114 {"w26", AARCH64_X0_REGNUM + 26},
115 {"w27", AARCH64_X0_REGNUM + 27},
116 {"w28", AARCH64_X0_REGNUM + 28},
117 {"w29", AARCH64_X0_REGNUM + 29},
118 {"w30", AARCH64_X0_REGNUM + 30},
121 {"ip0", AARCH64_X0_REGNUM + 16},
122 {"ip1", AARCH64_X0_REGNUM + 17}
125 /* The required core 'R' registers. */
126 static const char *const aarch64_r_register_names[] =
128 /* These registers must appear in consecutive RAW register number
129 order and they must begin with AARCH64_X0_REGNUM! */
130 "x0", "x1", "x2", "x3",
131 "x4", "x5", "x6", "x7",
132 "x8", "x9", "x10", "x11",
133 "x12", "x13", "x14", "x15",
134 "x16", "x17", "x18", "x19",
135 "x20", "x21", "x22", "x23",
136 "x24", "x25", "x26", "x27",
137 "x28", "x29", "x30", "sp",
141 /* The FP/SIMD 'V' registers. */
142 static const char *const aarch64_v_register_names[] =
144 /* These registers must appear in consecutive RAW register number
145 order and they must begin with AARCH64_V0_REGNUM! */
146 "v0", "v1", "v2", "v3",
147 "v4", "v5", "v6", "v7",
148 "v8", "v9", "v10", "v11",
149 "v12", "v13", "v14", "v15",
150 "v16", "v17", "v18", "v19",
151 "v20", "v21", "v22", "v23",
152 "v24", "v25", "v26", "v27",
153 "v28", "v29", "v30", "v31",
158 /* The SVE 'Z' and 'P' registers. */
159 static const char *const aarch64_sve_register_names[] =
161 /* These registers must appear in consecutive RAW register number
162 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
163 "z0", "z1", "z2", "z3",
164 "z4", "z5", "z6", "z7",
165 "z8", "z9", "z10", "z11",
166 "z12", "z13", "z14", "z15",
167 "z16", "z17", "z18", "z19",
168 "z20", "z21", "z22", "z23",
169 "z24", "z25", "z26", "z27",
170 "z28", "z29", "z30", "z31",
172 "p0", "p1", "p2", "p3",
173 "p4", "p5", "p6", "p7",
174 "p8", "p9", "p10", "p11",
175 "p12", "p13", "p14", "p15",
179 static const char *const aarch64_pauth_register_names[] =
181 /* Authentication mask for data pointer. */
183 /* Authentication mask for code pointer. */
187 /* AArch64 prologue cache structure. */
188 struct aarch64_prologue_cache
190 /* The program counter at the start of the function. It is used to
191 identify this frame as a prologue frame. */
194 /* The program counter at the time this frame was created; i.e. where
195 this function was called from. It is used to identify this frame as a
199 /* The stack pointer at the time this frame was created; i.e. the
200 caller's stack pointer when this function was called. It is used
201 to identify this frame. */
204 /* Is the target available to read from? */
207 /* The frame base for this frame is just prev_sp - frame size.
208 FRAMESIZE is the distance from the frame pointer to the
209 initial stack pointer. */
212 /* The register used to hold the frame pointer for this frame. */
215 /* Saved register offsets. */
216 struct trad_frame_saved_reg *saved_regs;
220 show_aarch64_debug (struct ui_file *file, int from_tty,
221 struct cmd_list_element *c, const char *value)
223 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
228 /* Abstract instruction reader. */
230 class abstract_instruction_reader
233 /* Read in one instruction. */
234 virtual ULONGEST read (CORE_ADDR memaddr, int len,
235 enum bfd_endian byte_order) = 0;
238 /* Instruction reader from real target. */
240 class instruction_reader : public abstract_instruction_reader
243 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
246 return read_code_unsigned_integer (memaddr, len, byte_order);
252 /* If address signing is enabled, mask off the signature bits from ADDR, using
253 the register values in THIS_FRAME. */
256 aarch64_frame_unmask_address (struct gdbarch_tdep *tdep,
257 struct frame_info *this_frame,
260 if (tdep->has_pauth ()
261 && frame_unwind_register_unsigned (this_frame,
262 tdep->pauth_ra_state_regnum))
264 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
265 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
266 addr = addr & ~cmask;
272 /* Analyze a prologue, looking for a recognizable stack frame
273 and frame pointer. Scan until we encounter a store that could
274 clobber the stack frame unexpectedly, or an unknown instruction. */
277 aarch64_analyze_prologue (struct gdbarch *gdbarch,
278 CORE_ADDR start, CORE_ADDR limit,
279 struct aarch64_prologue_cache *cache,
280 abstract_instruction_reader& reader)
282 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
284 /* Track X registers and D registers in prologue. */
285 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
287 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
288 regs[i] = pv_register (i, 0);
289 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
291 for (; start < limit; start += 4)
296 insn = reader.read (start, 4, byte_order_for_code);
298 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
301 if (inst.opcode->iclass == addsub_imm
302 && (inst.opcode->op == OP_ADD
303 || strcmp ("sub", inst.opcode->name) == 0))
305 unsigned rd = inst.operands[0].reg.regno;
306 unsigned rn = inst.operands[1].reg.regno;
308 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
309 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
310 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
311 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
313 if (inst.opcode->op == OP_ADD)
315 regs[rd] = pv_add_constant (regs[rn],
316 inst.operands[2].imm.value);
320 regs[rd] = pv_add_constant (regs[rn],
321 -inst.operands[2].imm.value);
324 else if (inst.opcode->iclass == pcreladdr
325 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
327 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
328 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
330 regs[inst.operands[0].reg.regno] = pv_unknown ();
332 else if (inst.opcode->iclass == branch_imm)
334 /* Stop analysis on branch. */
337 else if (inst.opcode->iclass == condbranch)
339 /* Stop analysis on branch. */
342 else if (inst.opcode->iclass == branch_reg)
344 /* Stop analysis on branch. */
347 else if (inst.opcode->iclass == compbranch)
349 /* Stop analysis on branch. */
352 else if (inst.opcode->op == OP_MOVZ)
354 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
355 regs[inst.operands[0].reg.regno] = pv_unknown ();
357 else if (inst.opcode->iclass == log_shift
358 && strcmp (inst.opcode->name, "orr") == 0)
360 unsigned rd = inst.operands[0].reg.regno;
361 unsigned rn = inst.operands[1].reg.regno;
362 unsigned rm = inst.operands[2].reg.regno;
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
365 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
366 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
368 if (inst.operands[2].shifter.amount == 0
369 && rn == AARCH64_SP_REGNUM)
375 debug_printf ("aarch64: prologue analysis gave up "
376 "addr=%s opcode=0x%x (orr x register)\n",
377 core_addr_to_string_nz (start), insn);
382 else if (inst.opcode->op == OP_STUR)
384 unsigned rt = inst.operands[0].reg.regno;
385 unsigned rn = inst.operands[1].addr.base_regno;
387 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
389 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
390 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
391 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
392 gdb_assert (!inst.operands[1].addr.offset.is_reg);
394 stack.store (pv_add_constant (regs[rn],
395 inst.operands[1].addr.offset.imm),
396 is64 ? 8 : 4, regs[rt]);
398 else if ((inst.opcode->iclass == ldstpair_off
399 || (inst.opcode->iclass == ldstpair_indexed
400 && inst.operands[2].addr.preind))
401 && strcmp ("stp", inst.opcode->name) == 0)
403 /* STP with addressing mode Pre-indexed and Base register. */
406 unsigned rn = inst.operands[2].addr.base_regno;
407 int32_t imm = inst.operands[2].addr.offset.imm;
409 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
410 || inst.operands[0].type == AARCH64_OPND_Ft);
411 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
412 || inst.operands[1].type == AARCH64_OPND_Ft2);
413 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
414 gdb_assert (!inst.operands[2].addr.offset.is_reg);
416 /* If recording this store would invalidate the store area
417 (perhaps because rn is not known) then we should abandon
418 further prologue analysis. */
419 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
422 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
425 rt1 = inst.operands[0].reg.regno;
426 rt2 = inst.operands[1].reg.regno;
427 if (inst.operands[0].type == AARCH64_OPND_Ft)
429 /* Only bottom 64-bit of each V register (D register) need
431 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
432 rt1 += AARCH64_X_REGISTER_COUNT;
433 rt2 += AARCH64_X_REGISTER_COUNT;
436 stack.store (pv_add_constant (regs[rn], imm), 8,
438 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
441 if (inst.operands[2].addr.writeback)
442 regs[rn] = pv_add_constant (regs[rn], imm);
445 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
446 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
447 && (inst.opcode->op == OP_STR_POS
448 || inst.opcode->op == OP_STRF_POS)))
449 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
450 && strcmp ("str", inst.opcode->name) == 0)
452 /* STR (immediate) */
453 unsigned int rt = inst.operands[0].reg.regno;
454 int32_t imm = inst.operands[1].addr.offset.imm;
455 unsigned int rn = inst.operands[1].addr.base_regno;
457 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
458 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
459 || inst.operands[0].type == AARCH64_OPND_Ft);
461 if (inst.operands[0].type == AARCH64_OPND_Ft)
463 /* Only bottom 64-bit of each V register (D register) need
465 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
466 rt += AARCH64_X_REGISTER_COUNT;
469 stack.store (pv_add_constant (regs[rn], imm),
470 is64 ? 8 : 4, regs[rt]);
471 if (inst.operands[1].addr.writeback)
472 regs[rn] = pv_add_constant (regs[rn], imm);
474 else if (inst.opcode->iclass == testbranch)
476 /* Stop analysis on branch. */
479 else if (inst.opcode->iclass == ic_system)
481 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
482 int ra_state_val = 0;
484 if (insn == 0xd503233f /* paciasp. */
485 || insn == 0xd503237f /* pacibsp. */)
487 /* Return addresses are mangled. */
490 else if (insn == 0xd50323bf /* autiasp. */
491 || insn == 0xd50323ff /* autibsp. */)
493 /* Return addresses are not mangled. */
499 debug_printf ("aarch64: prologue analysis gave up addr=%s"
500 " opcode=0x%x (iclass)\n",
501 core_addr_to_string_nz (start), insn);
505 if (tdep->has_pauth () && cache != nullptr)
506 trad_frame_set_value (cache->saved_regs,
507 tdep->pauth_ra_state_regnum,
514 debug_printf ("aarch64: prologue analysis gave up addr=%s"
516 core_addr_to_string_nz (start), insn);
525 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
527 /* Frame pointer is fp. Frame size is constant. */
528 cache->framereg = AARCH64_FP_REGNUM;
529 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
531 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
533 /* Try the stack pointer. */
534 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
535 cache->framereg = AARCH64_SP_REGNUM;
539 /* We're just out of luck. We don't know where the frame is. */
540 cache->framereg = -1;
541 cache->framesize = 0;
544 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
548 if (stack.find_reg (gdbarch, i, &offset))
549 cache->saved_regs[i].addr = offset;
552 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
554 int regnum = gdbarch_num_regs (gdbarch);
557 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
559 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
566 aarch64_analyze_prologue (struct gdbarch *gdbarch,
567 CORE_ADDR start, CORE_ADDR limit,
568 struct aarch64_prologue_cache *cache)
570 instruction_reader reader;
572 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
578 namespace selftests {
580 /* Instruction reader from manually cooked instruction sequences. */
582 class instruction_reader_test : public abstract_instruction_reader
585 template<size_t SIZE>
586 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
587 : m_insns (insns), m_insns_size (SIZE)
590 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
593 SELF_CHECK (len == 4);
594 SELF_CHECK (memaddr % 4 == 0);
595 SELF_CHECK (memaddr / 4 < m_insns_size);
597 return m_insns[memaddr / 4];
601 const uint32_t *m_insns;
606 aarch64_analyze_prologue_test (void)
608 struct gdbarch_info info;
610 gdbarch_info_init (&info);
611 info.bfd_arch_info = bfd_scan_arch ("aarch64");
613 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
614 SELF_CHECK (gdbarch != NULL);
616 struct aarch64_prologue_cache cache;
617 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
619 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
621 /* Test the simple prologue in which frame pointer is used. */
623 static const uint32_t insns[] = {
624 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
625 0x910003fd, /* mov x29, sp */
626 0x97ffffe6, /* bl 0x400580 */
628 instruction_reader_test reader (insns);
630 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
631 SELF_CHECK (end == 4 * 2);
633 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
634 SELF_CHECK (cache.framesize == 272);
636 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
638 if (i == AARCH64_FP_REGNUM)
639 SELF_CHECK (cache.saved_regs[i].addr == -272);
640 else if (i == AARCH64_LR_REGNUM)
641 SELF_CHECK (cache.saved_regs[i].addr == -264);
643 SELF_CHECK (cache.saved_regs[i].addr == -1);
646 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
648 int regnum = gdbarch_num_regs (gdbarch);
650 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
655 /* Test a prologue in which STR is used and frame pointer is not
658 static const uint32_t insns[] = {
659 0xf81d0ff3, /* str x19, [sp, #-48]! */
660 0xb9002fe0, /* str w0, [sp, #44] */
661 0xf90013e1, /* str x1, [sp, #32]*/
662 0xfd000fe0, /* str d0, [sp, #24] */
663 0xaa0203f3, /* mov x19, x2 */
664 0xf94013e0, /* ldr x0, [sp, #32] */
666 instruction_reader_test reader (insns);
668 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
670 SELF_CHECK (end == 4 * 5);
672 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
673 SELF_CHECK (cache.framesize == 48);
675 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
678 SELF_CHECK (cache.saved_regs[i].addr == -16);
680 SELF_CHECK (cache.saved_regs[i].addr == -48);
682 SELF_CHECK (cache.saved_regs[i].addr == -1);
685 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
687 int regnum = gdbarch_num_regs (gdbarch);
690 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
693 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
698 /* Test a prologue in which there is a return address signing instruction. */
699 if (tdep->has_pauth ())
701 static const uint32_t insns[] = {
702 0xd503233f, /* paciasp */
703 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
704 0x910003fd, /* mov x29, sp */
705 0xf801c3f3, /* str x19, [sp, #28] */
706 0xb9401fa0, /* ldr x19, [x29, #28] */
708 instruction_reader_test reader (insns);
710 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
713 SELF_CHECK (end == 4 * 4);
714 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
715 SELF_CHECK (cache.framesize == 48);
717 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
720 SELF_CHECK (cache.saved_regs[i].addr == -20);
721 else if (i == AARCH64_FP_REGNUM)
722 SELF_CHECK (cache.saved_regs[i].addr == -48);
723 else if (i == AARCH64_LR_REGNUM)
724 SELF_CHECK (cache.saved_regs[i].addr == -40);
726 SELF_CHECK (cache.saved_regs[i].addr == -1);
729 if (tdep->has_pauth ())
731 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
732 tdep->pauth_ra_state_regnum));
733 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
737 } // namespace selftests
738 #endif /* GDB_SELF_TEST */
740 /* Implement the "skip_prologue" gdbarch method. */
743 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
745 CORE_ADDR func_addr, limit_pc;
747 /* See if we can determine the end of the prologue via the symbol
748 table. If so, then return either PC, or the PC after the
749 prologue, whichever is greater. */
750 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
752 CORE_ADDR post_prologue_pc
753 = skip_prologue_using_sal (gdbarch, func_addr);
755 if (post_prologue_pc != 0)
756 return std::max (pc, post_prologue_pc);
759 /* Can't determine prologue from the symbol table, need to examine
762 /* Find an upper limit on the function prologue using the debug
763 information. If the debug information could not be used to
764 provide that bound, then use an arbitrary large number as the
766 limit_pc = skip_prologue_using_sal (gdbarch, pc);
768 limit_pc = pc + 128; /* Magic. */
770 /* Try disassembling prologue. */
771 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
774 /* Scan the function prologue for THIS_FRAME and populate the prologue
778 aarch64_scan_prologue (struct frame_info *this_frame,
779 struct aarch64_prologue_cache *cache)
781 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
782 CORE_ADDR prologue_start;
783 CORE_ADDR prologue_end;
784 CORE_ADDR prev_pc = get_frame_pc (this_frame);
785 struct gdbarch *gdbarch = get_frame_arch (this_frame);
787 cache->prev_pc = prev_pc;
789 /* Assume we do not find a frame. */
790 cache->framereg = -1;
791 cache->framesize = 0;
793 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
796 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
800 /* No line info so use the current PC. */
801 prologue_end = prev_pc;
803 else if (sal.end < prologue_end)
805 /* The next line begins after the function end. */
806 prologue_end = sal.end;
809 prologue_end = std::min (prologue_end, prev_pc);
810 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
816 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
820 cache->framereg = AARCH64_FP_REGNUM;
821 cache->framesize = 16;
822 cache->saved_regs[29].addr = 0;
823 cache->saved_regs[30].addr = 8;
827 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
828 function may throw an exception if the inferior's registers or memory is
832 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
833 struct aarch64_prologue_cache *cache)
835 CORE_ADDR unwound_fp;
838 aarch64_scan_prologue (this_frame, cache);
840 if (cache->framereg == -1)
843 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
847 cache->prev_sp = unwound_fp + cache->framesize;
849 /* Calculate actual addresses of saved registers using offsets
850 determined by aarch64_analyze_prologue. */
851 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
852 if (trad_frame_addr_p (cache->saved_regs, reg))
853 cache->saved_regs[reg].addr += cache->prev_sp;
855 cache->func = get_frame_func (this_frame);
857 cache->available_p = 1;
860 /* Allocate and fill in *THIS_CACHE with information about the prologue of
861 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
862 Return a pointer to the current aarch64_prologue_cache in
865 static struct aarch64_prologue_cache *
866 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
868 struct aarch64_prologue_cache *cache;
870 if (*this_cache != NULL)
871 return (struct aarch64_prologue_cache *) *this_cache;
873 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
874 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
879 aarch64_make_prologue_cache_1 (this_frame, cache);
881 catch (const gdb_exception_error &ex)
883 if (ex.error != NOT_AVAILABLE_ERROR)
884 throw_exception (ex);
890 /* Implement the "stop_reason" frame_unwind method. */
892 static enum unwind_stop_reason
893 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
896 struct aarch64_prologue_cache *cache
897 = aarch64_make_prologue_cache (this_frame, this_cache);
899 if (!cache->available_p)
900 return UNWIND_UNAVAILABLE;
902 /* Halt the backtrace at "_start". */
903 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
904 return UNWIND_OUTERMOST;
906 /* We've hit a wall, stop. */
907 if (cache->prev_sp == 0)
908 return UNWIND_OUTERMOST;
910 return UNWIND_NO_REASON;
913 /* Our frame ID for a normal frame is the current function's starting
914 PC and the caller's SP when we were called. */
917 aarch64_prologue_this_id (struct frame_info *this_frame,
918 void **this_cache, struct frame_id *this_id)
920 struct aarch64_prologue_cache *cache
921 = aarch64_make_prologue_cache (this_frame, this_cache);
923 if (!cache->available_p)
924 *this_id = frame_id_build_unavailable_stack (cache->func);
926 *this_id = frame_id_build (cache->prev_sp, cache->func);
929 /* Implement the "prev_register" frame_unwind method. */
931 static struct value *
932 aarch64_prologue_prev_register (struct frame_info *this_frame,
933 void **this_cache, int prev_regnum)
935 struct aarch64_prologue_cache *cache
936 = aarch64_make_prologue_cache (this_frame, this_cache);
938 /* If we are asked to unwind the PC, then we need to return the LR
939 instead. The prologue may save PC, but it will point into this
940 frame's prologue, not the next frame's resume location. */
941 if (prev_regnum == AARCH64_PC_REGNUM)
944 struct gdbarch *gdbarch = get_frame_arch (this_frame);
945 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
947 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
949 if (tdep->has_pauth ()
950 && trad_frame_value_p (cache->saved_regs,
951 tdep->pauth_ra_state_regnum))
952 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
954 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
957 /* SP is generally not saved to the stack, but this frame is
958 identified by the next frame's stack pointer at the time of the
959 call. The value was already reconstructed into PREV_SP. */
972 if (prev_regnum == AARCH64_SP_REGNUM)
973 return frame_unwind_got_constant (this_frame, prev_regnum,
976 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
980 /* AArch64 prologue unwinder. */
981 struct frame_unwind aarch64_prologue_unwind =
984 aarch64_prologue_frame_unwind_stop_reason,
985 aarch64_prologue_this_id,
986 aarch64_prologue_prev_register,
988 default_frame_sniffer
991 /* Allocate and fill in *THIS_CACHE with information about the prologue of
992 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
993 Return a pointer to the current aarch64_prologue_cache in
996 static struct aarch64_prologue_cache *
997 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
999 struct aarch64_prologue_cache *cache;
1001 if (*this_cache != NULL)
1002 return (struct aarch64_prologue_cache *) *this_cache;
1004 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1005 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1006 *this_cache = cache;
1010 cache->prev_sp = get_frame_register_unsigned (this_frame,
1012 cache->prev_pc = get_frame_pc (this_frame);
1013 cache->available_p = 1;
1015 catch (const gdb_exception_error &ex)
1017 if (ex.error != NOT_AVAILABLE_ERROR)
1018 throw_exception (ex);
1024 /* Implement the "stop_reason" frame_unwind method. */
1026 static enum unwind_stop_reason
1027 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1030 struct aarch64_prologue_cache *cache
1031 = aarch64_make_stub_cache (this_frame, this_cache);
1033 if (!cache->available_p)
1034 return UNWIND_UNAVAILABLE;
1036 return UNWIND_NO_REASON;
1039 /* Our frame ID for a stub frame is the current SP and LR. */
1042 aarch64_stub_this_id (struct frame_info *this_frame,
1043 void **this_cache, struct frame_id *this_id)
1045 struct aarch64_prologue_cache *cache
1046 = aarch64_make_stub_cache (this_frame, this_cache);
1048 if (cache->available_p)
1049 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1051 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1054 /* Implement the "sniffer" frame_unwind method. */
1057 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1058 struct frame_info *this_frame,
1059 void **this_prologue_cache)
1061 CORE_ADDR addr_in_block;
1064 addr_in_block = get_frame_address_in_block (this_frame);
1065 if (in_plt_section (addr_in_block)
1066 /* We also use the stub winder if the target memory is unreadable
1067 to avoid having the prologue unwinder trying to read it. */
1068 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1074 /* AArch64 stub unwinder. */
1075 struct frame_unwind aarch64_stub_unwind =
1078 aarch64_stub_frame_unwind_stop_reason,
1079 aarch64_stub_this_id,
1080 aarch64_prologue_prev_register,
1082 aarch64_stub_unwind_sniffer
1085 /* Return the frame base address of *THIS_FRAME. */
1088 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1090 struct aarch64_prologue_cache *cache
1091 = aarch64_make_prologue_cache (this_frame, this_cache);
1093 return cache->prev_sp - cache->framesize;
1096 /* AArch64 default frame base information. */
1097 struct frame_base aarch64_normal_base =
1099 &aarch64_prologue_unwind,
1100 aarch64_normal_frame_base,
1101 aarch64_normal_frame_base,
1102 aarch64_normal_frame_base
1105 /* Return the value of the REGNUM register in the previous frame of
1108 static struct value *
1109 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1110 void **this_cache, int regnum)
1112 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1117 case AARCH64_PC_REGNUM:
1118 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1119 lr = aarch64_frame_unmask_address (tdep, this_frame, lr);
1120 return frame_unwind_got_constant (this_frame, regnum, lr);
1123 internal_error (__FILE__, __LINE__,
1124 _("Unexpected register %d"), regnum);
1128 static const unsigned char op_lit0 = DW_OP_lit0;
1129 static const unsigned char op_lit1 = DW_OP_lit1;
1131 /* Implement the "init_reg" dwarf2_frame_ops method. */
1134 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1135 struct dwarf2_frame_state_reg *reg,
1136 struct frame_info *this_frame)
1138 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1142 case AARCH64_PC_REGNUM:
1143 reg->how = DWARF2_FRAME_REG_FN;
1144 reg->loc.fn = aarch64_dwarf2_prev_register;
1147 case AARCH64_SP_REGNUM:
1148 reg->how = DWARF2_FRAME_REG_CFA;
1152 /* Init pauth registers. */
1153 if (tdep->has_pauth ())
1155 if (regnum == tdep->pauth_ra_state_regnum)
1157 /* Initialize RA_STATE to zero. */
1158 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1159 reg->loc.exp.start = &op_lit0;
1160 reg->loc.exp.len = 1;
1163 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1164 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1166 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1172 /* Implement the execute_dwarf_cfa_vendor_op method. */
1175 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1176 struct dwarf2_frame_state *fs)
1178 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1179 struct dwarf2_frame_state_reg *ra_state;
1181 if (tdep->has_pauth () && op == DW_CFA_AARCH64_negate_ra_state)
1183 /* Allocate RA_STATE column if it's not allocated yet. */
1184 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1186 /* Toggle the status of RA_STATE between 0 and 1. */
1187 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1188 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1190 if (ra_state->loc.exp.start == nullptr
1191 || ra_state->loc.exp.start == &op_lit0)
1192 ra_state->loc.exp.start = &op_lit1;
1194 ra_state->loc.exp.start = &op_lit0;
1196 ra_state->loc.exp.len = 1;
1204 /* When arguments must be pushed onto the stack, they go on in reverse
1205 order. The code below implements a FILO (stack) to do this. */
1209 /* Value to pass on stack. It can be NULL if this item is for stack
1211 const gdb_byte *data;
1213 /* Size in bytes of value to pass on stack. */
1217 DEF_VEC_O (stack_item_t);
1219 /* Return the alignment (in bytes) of the given type. */
1222 aarch64_type_align (struct type *t)
1228 t = check_typedef (t);
1229 switch (TYPE_CODE (t))
1232 /* Should never happen. */
1233 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1237 case TYPE_CODE_ENUM:
1241 case TYPE_CODE_RANGE:
1242 case TYPE_CODE_BITSTRING:
1244 case TYPE_CODE_RVALUE_REF:
1245 case TYPE_CODE_CHAR:
1246 case TYPE_CODE_BOOL:
1247 return TYPE_LENGTH (t);
1249 case TYPE_CODE_ARRAY:
1250 if (TYPE_VECTOR (t))
1252 /* Use the natural alignment for vector types (the same for
1253 scalar type), but the maximum alignment is 128-bit. */
1254 if (TYPE_LENGTH (t) > 16)
1257 return TYPE_LENGTH (t);
1260 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1261 case TYPE_CODE_COMPLEX:
1262 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1264 case TYPE_CODE_STRUCT:
1265 case TYPE_CODE_UNION:
1267 for (n = 0; n < TYPE_NFIELDS (t); n++)
1269 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1277 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1279 Return the number of register required, or -1 on failure.
1281 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1282 to the element, else fail if the type of this element does not match the
1286 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1287 struct type **fundamental_type)
1289 if (type == nullptr)
1292 switch (TYPE_CODE (type))
1295 if (TYPE_LENGTH (type) > 16)
1298 if (*fundamental_type == nullptr)
1299 *fundamental_type = type;
1300 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1301 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1306 case TYPE_CODE_COMPLEX:
1308 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1309 if (TYPE_LENGTH (target_type) > 16)
1312 if (*fundamental_type == nullptr)
1313 *fundamental_type = target_type;
1314 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1315 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1321 case TYPE_CODE_ARRAY:
1323 if (TYPE_VECTOR (type))
1325 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1328 if (*fundamental_type == nullptr)
1329 *fundamental_type = type;
1330 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1331 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1338 struct type *target_type = TYPE_TARGET_TYPE (type);
1339 int count = aapcs_is_vfp_call_or_return_candidate_1
1340 (target_type, fundamental_type);
1345 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1350 case TYPE_CODE_STRUCT:
1351 case TYPE_CODE_UNION:
1355 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1357 /* Ignore any static fields. */
1358 if (field_is_static (&TYPE_FIELD (type, i)))
1361 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1363 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1364 (member, fundamental_type);
1365 if (sub_count == -1)
1370 /* Ensure there is no padding between the fields (allowing for empty
1371 zero length structs) */
1372 int ftype_length = (*fundamental_type == nullptr)
1373 ? 0 : TYPE_LENGTH (*fundamental_type);
1374 if (count * ftype_length != TYPE_LENGTH (type))
1387 /* Return true if an argument, whose type is described by TYPE, can be passed or
1388 returned in simd/fp registers, providing enough parameter passing registers
1389 are available. This is as described in the AAPCS64.
1391 Upon successful return, *COUNT returns the number of needed registers,
1392 *FUNDAMENTAL_TYPE contains the type of those registers.
1394 Candidate as per the AAPCS64 5.4.2.C is either a:
1397 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1398 all the members are floats and has at most 4 members.
1399 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1400 all the members are short vectors and has at most 4 members.
1403 Note that HFAs and HVAs can include nested structures and arrays. */
1406 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1407 struct type **fundamental_type)
1409 if (type == nullptr)
1412 *fundamental_type = nullptr;
1414 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1417 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1426 /* AArch64 function call information structure. */
1427 struct aarch64_call_info
1429 /* the current argument number. */
1432 /* The next general purpose register number, equivalent to NGRN as
1433 described in the AArch64 Procedure Call Standard. */
1436 /* The next SIMD and floating point register number, equivalent to
1437 NSRN as described in the AArch64 Procedure Call Standard. */
1440 /* The next stacked argument address, equivalent to NSAA as
1441 described in the AArch64 Procedure Call Standard. */
1444 /* Stack item vector. */
1445 VEC(stack_item_t) *si;
1448 /* Pass a value in a sequence of consecutive X registers. The caller
1449 is responsbile for ensuring sufficient registers are available. */
1452 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1453 struct aarch64_call_info *info, struct type *type,
1456 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1457 int len = TYPE_LENGTH (type);
1458 enum type_code typecode = TYPE_CODE (type);
1459 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1460 const bfd_byte *buf = value_contents (arg);
1466 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1467 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1471 /* Adjust sub-word struct/union args when big-endian. */
1472 if (byte_order == BFD_ENDIAN_BIG
1473 && partial_len < X_REGISTER_SIZE
1474 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1475 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1479 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1480 gdbarch_register_name (gdbarch, regnum),
1481 phex (regval, X_REGISTER_SIZE));
1483 regcache_cooked_write_unsigned (regcache, regnum, regval);
1490 /* Attempt to marshall a value in a V register. Return 1 if
1491 successful, or 0 if insufficient registers are available. This
1492 function, unlike the equivalent pass_in_x() function does not
1493 handle arguments spread across multiple registers. */
1496 pass_in_v (struct gdbarch *gdbarch,
1497 struct regcache *regcache,
1498 struct aarch64_call_info *info,
1499 int len, const bfd_byte *buf)
1503 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1504 /* Enough space for a full vector register. */
1505 gdb_byte reg[register_size (gdbarch, regnum)];
1506 gdb_assert (len <= sizeof (reg));
1511 memset (reg, 0, sizeof (reg));
1512 /* PCS C.1, the argument is allocated to the least significant
1513 bits of V register. */
1514 memcpy (reg, buf, len);
1515 regcache->cooked_write (regnum, reg);
1519 debug_printf ("arg %d in %s\n", info->argnum,
1520 gdbarch_register_name (gdbarch, regnum));
1528 /* Marshall an argument onto the stack. */
1531 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1534 const bfd_byte *buf = value_contents (arg);
1535 int len = TYPE_LENGTH (type);
1541 align = aarch64_type_align (type);
1543 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1544 Natural alignment of the argument's type. */
1545 align = align_up (align, 8);
1547 /* The AArch64 PCS requires at most doubleword alignment. */
1553 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1559 VEC_safe_push (stack_item_t, info->si, &item);
1562 if (info->nsaa & (align - 1))
1564 /* Push stack alignment padding. */
1565 int pad = align - (info->nsaa & (align - 1));
1570 VEC_safe_push (stack_item_t, info->si, &item);
1575 /* Marshall an argument into a sequence of one or more consecutive X
1576 registers or, if insufficient X registers are available then onto
1580 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1581 struct aarch64_call_info *info, struct type *type,
1584 int len = TYPE_LENGTH (type);
1585 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1587 /* PCS C.13 - Pass in registers if we have enough spare */
1588 if (info->ngrn + nregs <= 8)
1590 pass_in_x (gdbarch, regcache, info, type, arg);
1591 info->ngrn += nregs;
1596 pass_on_stack (info, type, arg);
1600 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1601 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1602 registers. A return value of false is an error state as the value will have
1603 been partially passed to the stack. */
1605 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1606 struct aarch64_call_info *info, struct type *arg_type,
1609 switch (TYPE_CODE (arg_type))
1612 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1613 value_contents (arg));
1616 case TYPE_CODE_COMPLEX:
1618 const bfd_byte *buf = value_contents (arg);
1619 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1621 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1625 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1626 buf + TYPE_LENGTH (target_type));
1629 case TYPE_CODE_ARRAY:
1630 if (TYPE_VECTOR (arg_type))
1631 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1632 value_contents (arg));
1635 case TYPE_CODE_STRUCT:
1636 case TYPE_CODE_UNION:
1637 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1639 /* Don't include static fields. */
1640 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1643 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1644 struct type *field_type = check_typedef (value_type (field));
1646 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1657 /* Implement the "push_dummy_call" gdbarch method. */
1660 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1661 struct regcache *regcache, CORE_ADDR bp_addr,
1663 struct value **args, CORE_ADDR sp,
1664 function_call_return_method return_method,
1665 CORE_ADDR struct_addr)
1668 struct aarch64_call_info info;
1670 memset (&info, 0, sizeof (info));
1672 /* We need to know what the type of the called function is in order
1673 to determine the number of named/anonymous arguments for the
1674 actual argument placement, and the return type in order to handle
1675 return value correctly.
1677 The generic code above us views the decision of return in memory
1678 or return in registers as a two stage processes. The language
1679 handler is consulted first and may decide to return in memory (eg
1680 class with copy constructor returned by value), this will cause
1681 the generic code to allocate space AND insert an initial leading
1684 If the language code does not decide to pass in memory then the
1685 target code is consulted.
1687 If the language code decides to pass in memory we want to move
1688 the pointer inserted as the initial argument from the argument
1689 list and into X8, the conventional AArch64 struct return pointer
1692 /* Set the return address. For the AArch64, the return breakpoint
1693 is always at BP_ADDR. */
1694 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1696 /* If we were given an initial argument for the return slot, lose it. */
1697 if (return_method == return_method_hidden_param)
1703 /* The struct_return pointer occupies X8. */
1704 if (return_method != return_method_normal)
1708 debug_printf ("struct return in %s = 0x%s\n",
1709 gdbarch_register_name (gdbarch,
1710 AARCH64_STRUCT_RETURN_REGNUM),
1711 paddress (gdbarch, struct_addr));
1713 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1717 for (argnum = 0; argnum < nargs; argnum++)
1719 struct value *arg = args[argnum];
1720 struct type *arg_type, *fundamental_type;
1723 arg_type = check_typedef (value_type (arg));
1724 len = TYPE_LENGTH (arg_type);
1726 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1727 if there are enough spare registers. */
1728 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1731 if (info.nsrn + elements <= 8)
1733 /* We know that we have sufficient registers available therefore
1734 this will never need to fallback to the stack. */
1735 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1737 gdb_assert_not_reached ("Failed to push args");
1742 pass_on_stack (&info, arg_type, arg);
1747 switch (TYPE_CODE (arg_type))
1750 case TYPE_CODE_BOOL:
1751 case TYPE_CODE_CHAR:
1752 case TYPE_CODE_RANGE:
1753 case TYPE_CODE_ENUM:
1756 /* Promote to 32 bit integer. */
1757 if (TYPE_UNSIGNED (arg_type))
1758 arg_type = builtin_type (gdbarch)->builtin_uint32;
1760 arg_type = builtin_type (gdbarch)->builtin_int32;
1761 arg = value_cast (arg_type, arg);
1763 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1766 case TYPE_CODE_STRUCT:
1767 case TYPE_CODE_ARRAY:
1768 case TYPE_CODE_UNION:
1771 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1772 invisible reference. */
1774 /* Allocate aligned storage. */
1775 sp = align_down (sp - len, 16);
1777 /* Write the real data into the stack. */
1778 write_memory (sp, value_contents (arg), len);
1780 /* Construct the indirection. */
1781 arg_type = lookup_pointer_type (arg_type);
1782 arg = value_from_pointer (arg_type, sp);
1783 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1786 /* PCS C.15 / C.18 multiple values pass. */
1787 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1791 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1796 /* Make sure stack retains 16 byte alignment. */
1798 sp -= 16 - (info.nsaa & 15);
1800 while (!VEC_empty (stack_item_t, info.si))
1802 stack_item_t *si = VEC_last (stack_item_t, info.si);
1805 if (si->data != NULL)
1806 write_memory (sp, si->data, si->len);
1807 VEC_pop (stack_item_t, info.si);
1810 VEC_free (stack_item_t, info.si);
1812 /* Finally, update the SP register. */
1813 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1818 /* Implement the "frame_align" gdbarch method. */
1821 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1823 /* Align the stack to sixteen bytes. */
1824 return sp & ~(CORE_ADDR) 15;
1827 /* Return the type for an AdvSISD Q register. */
1829 static struct type *
1830 aarch64_vnq_type (struct gdbarch *gdbarch)
1832 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1834 if (tdep->vnq_type == NULL)
1839 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1842 elem = builtin_type (gdbarch)->builtin_uint128;
1843 append_composite_type_field (t, "u", elem);
1845 elem = builtin_type (gdbarch)->builtin_int128;
1846 append_composite_type_field (t, "s", elem);
1851 return tdep->vnq_type;
1854 /* Return the type for an AdvSISD D register. */
1856 static struct type *
1857 aarch64_vnd_type (struct gdbarch *gdbarch)
1859 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1861 if (tdep->vnd_type == NULL)
1866 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1869 elem = builtin_type (gdbarch)->builtin_double;
1870 append_composite_type_field (t, "f", elem);
1872 elem = builtin_type (gdbarch)->builtin_uint64;
1873 append_composite_type_field (t, "u", elem);
1875 elem = builtin_type (gdbarch)->builtin_int64;
1876 append_composite_type_field (t, "s", elem);
1881 return tdep->vnd_type;
1884 /* Return the type for an AdvSISD S register. */
1886 static struct type *
1887 aarch64_vns_type (struct gdbarch *gdbarch)
1889 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1891 if (tdep->vns_type == NULL)
1896 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1899 elem = builtin_type (gdbarch)->builtin_float;
1900 append_composite_type_field (t, "f", elem);
1902 elem = builtin_type (gdbarch)->builtin_uint32;
1903 append_composite_type_field (t, "u", elem);
1905 elem = builtin_type (gdbarch)->builtin_int32;
1906 append_composite_type_field (t, "s", elem);
1911 return tdep->vns_type;
1914 /* Return the type for an AdvSISD H register. */
1916 static struct type *
1917 aarch64_vnh_type (struct gdbarch *gdbarch)
1919 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1921 if (tdep->vnh_type == NULL)
1926 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1929 elem = builtin_type (gdbarch)->builtin_uint16;
1930 append_composite_type_field (t, "u", elem);
1932 elem = builtin_type (gdbarch)->builtin_int16;
1933 append_composite_type_field (t, "s", elem);
1938 return tdep->vnh_type;
1941 /* Return the type for an AdvSISD B register. */
1943 static struct type *
1944 aarch64_vnb_type (struct gdbarch *gdbarch)
1946 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1948 if (tdep->vnb_type == NULL)
1953 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1956 elem = builtin_type (gdbarch)->builtin_uint8;
1957 append_composite_type_field (t, "u", elem);
1959 elem = builtin_type (gdbarch)->builtin_int8;
1960 append_composite_type_field (t, "s", elem);
1965 return tdep->vnb_type;
1968 /* Return the type for an AdvSISD V register. */
1970 static struct type *
1971 aarch64_vnv_type (struct gdbarch *gdbarch)
1973 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1975 if (tdep->vnv_type == NULL)
1977 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1978 slice from the non-pseudo vector registers. However NEON V registers
1979 are always vector registers, and need constructing as such. */
1980 const struct builtin_type *bt = builtin_type (gdbarch);
1982 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1985 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1987 append_composite_type_field (sub, "f",
1988 init_vector_type (bt->builtin_double, 2));
1989 append_composite_type_field (sub, "u",
1990 init_vector_type (bt->builtin_uint64, 2));
1991 append_composite_type_field (sub, "s",
1992 init_vector_type (bt->builtin_int64, 2));
1993 append_composite_type_field (t, "d", sub);
1995 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1997 append_composite_type_field (sub, "f",
1998 init_vector_type (bt->builtin_float, 4));
1999 append_composite_type_field (sub, "u",
2000 init_vector_type (bt->builtin_uint32, 4));
2001 append_composite_type_field (sub, "s",
2002 init_vector_type (bt->builtin_int32, 4));
2003 append_composite_type_field (t, "s", sub);
2005 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2007 append_composite_type_field (sub, "u",
2008 init_vector_type (bt->builtin_uint16, 8));
2009 append_composite_type_field (sub, "s",
2010 init_vector_type (bt->builtin_int16, 8));
2011 append_composite_type_field (t, "h", sub);
2013 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2015 append_composite_type_field (sub, "u",
2016 init_vector_type (bt->builtin_uint8, 16));
2017 append_composite_type_field (sub, "s",
2018 init_vector_type (bt->builtin_int8, 16));
2019 append_composite_type_field (t, "b", sub);
2021 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2023 append_composite_type_field (sub, "u",
2024 init_vector_type (bt->builtin_uint128, 1));
2025 append_composite_type_field (sub, "s",
2026 init_vector_type (bt->builtin_int128, 1));
2027 append_composite_type_field (t, "q", sub);
2032 return tdep->vnv_type;
2035 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2038 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2040 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2042 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2043 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2045 if (reg == AARCH64_DWARF_SP)
2046 return AARCH64_SP_REGNUM;
2048 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2049 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2051 if (reg == AARCH64_DWARF_SVE_VG)
2052 return AARCH64_SVE_VG_REGNUM;
2054 if (reg == AARCH64_DWARF_SVE_FFR)
2055 return AARCH64_SVE_FFR_REGNUM;
2057 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2058 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2060 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2061 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2063 if (tdep->has_pauth ())
2065 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2066 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2068 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2069 return tdep->pauth_ra_state_regnum;
2075 /* Implement the "print_insn" gdbarch method. */
2078 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2080 info->symbols = NULL;
2081 return default_print_insn (memaddr, info);
2084 /* AArch64 BRK software debug mode instruction.
2085 Note that AArch64 code is always little-endian.
2086 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2087 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2089 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2091 /* Extract from an array REGS containing the (raw) register state a
2092 function return value of type TYPE, and copy that, in virtual
2093 format, into VALBUF. */
2096 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2099 struct gdbarch *gdbarch = regs->arch ();
2100 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2102 struct type *fundamental_type;
2104 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2107 int len = TYPE_LENGTH (fundamental_type);
2109 for (int i = 0; i < elements; i++)
2111 int regno = AARCH64_V0_REGNUM + i;
2112 /* Enough space for a full vector register. */
2113 gdb_byte buf[register_size (gdbarch, regno)];
2114 gdb_assert (len <= sizeof (buf));
2118 debug_printf ("read HFA or HVA return value element %d from %s\n",
2120 gdbarch_register_name (gdbarch, regno));
2122 regs->cooked_read (regno, buf);
2124 memcpy (valbuf, buf, len);
2128 else if (TYPE_CODE (type) == TYPE_CODE_INT
2129 || TYPE_CODE (type) == TYPE_CODE_CHAR
2130 || TYPE_CODE (type) == TYPE_CODE_BOOL
2131 || TYPE_CODE (type) == TYPE_CODE_PTR
2132 || TYPE_IS_REFERENCE (type)
2133 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2135 /* If the type is a plain integer, then the access is
2136 straight-forward. Otherwise we have to play around a bit
2138 int len = TYPE_LENGTH (type);
2139 int regno = AARCH64_X0_REGNUM;
2144 /* By using store_unsigned_integer we avoid having to do
2145 anything special for small big-endian values. */
2146 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2147 store_unsigned_integer (valbuf,
2148 (len > X_REGISTER_SIZE
2149 ? X_REGISTER_SIZE : len), byte_order, tmp);
2150 len -= X_REGISTER_SIZE;
2151 valbuf += X_REGISTER_SIZE;
2156 /* For a structure or union the behaviour is as if the value had
2157 been stored to word-aligned memory and then loaded into
2158 registers with 64-bit load instruction(s). */
2159 int len = TYPE_LENGTH (type);
2160 int regno = AARCH64_X0_REGNUM;
2161 bfd_byte buf[X_REGISTER_SIZE];
2165 regs->cooked_read (regno++, buf);
2166 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2167 len -= X_REGISTER_SIZE;
2168 valbuf += X_REGISTER_SIZE;
2174 /* Will a function return an aggregate type in memory or in a
2175 register? Return 0 if an aggregate type can be returned in a
2176 register, 1 if it must be returned in memory. */
2179 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2181 type = check_typedef (type);
2183 struct type *fundamental_type;
2185 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2188 /* v0-v7 are used to return values and one register is allocated
2189 for one member. However, HFA or HVA has at most four members. */
2193 if (TYPE_LENGTH (type) > 16)
2195 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2196 invisible reference. */
2204 /* Write into appropriate registers a function return value of type
2205 TYPE, given in virtual format. */
2208 aarch64_store_return_value (struct type *type, struct regcache *regs,
2209 const gdb_byte *valbuf)
2211 struct gdbarch *gdbarch = regs->arch ();
2212 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2214 struct type *fundamental_type;
2216 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2219 int len = TYPE_LENGTH (fundamental_type);
2221 for (int i = 0; i < elements; i++)
2223 int regno = AARCH64_V0_REGNUM + i;
2224 /* Enough space for a full vector register. */
2225 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2226 gdb_assert (len <= sizeof (tmpbuf));
2230 debug_printf ("write HFA or HVA return value element %d to %s\n",
2232 gdbarch_register_name (gdbarch, regno));
2235 memcpy (tmpbuf, valbuf,
2236 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2237 regs->cooked_write (regno, tmpbuf);
2241 else if (TYPE_CODE (type) == TYPE_CODE_INT
2242 || TYPE_CODE (type) == TYPE_CODE_CHAR
2243 || TYPE_CODE (type) == TYPE_CODE_BOOL
2244 || TYPE_CODE (type) == TYPE_CODE_PTR
2245 || TYPE_IS_REFERENCE (type)
2246 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2248 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2250 /* Values of one word or less are zero/sign-extended and
2252 bfd_byte tmpbuf[X_REGISTER_SIZE];
2253 LONGEST val = unpack_long (type, valbuf);
2255 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2256 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2260 /* Integral values greater than one word are stored in
2261 consecutive registers starting with r0. This will always
2262 be a multiple of the regiser size. */
2263 int len = TYPE_LENGTH (type);
2264 int regno = AARCH64_X0_REGNUM;
2268 regs->cooked_write (regno++, valbuf);
2269 len -= X_REGISTER_SIZE;
2270 valbuf += X_REGISTER_SIZE;
2276 /* For a structure or union the behaviour is as if the value had
2277 been stored to word-aligned memory and then loaded into
2278 registers with 64-bit load instruction(s). */
2279 int len = TYPE_LENGTH (type);
2280 int regno = AARCH64_X0_REGNUM;
2281 bfd_byte tmpbuf[X_REGISTER_SIZE];
2285 memcpy (tmpbuf, valbuf,
2286 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2287 regs->cooked_write (regno++, tmpbuf);
2288 len -= X_REGISTER_SIZE;
2289 valbuf += X_REGISTER_SIZE;
2294 /* Implement the "return_value" gdbarch method. */
2296 static enum return_value_convention
2297 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2298 struct type *valtype, struct regcache *regcache,
2299 gdb_byte *readbuf, const gdb_byte *writebuf)
2302 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2303 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2304 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2306 if (aarch64_return_in_memory (gdbarch, valtype))
2309 debug_printf ("return value in memory\n");
2310 return RETURN_VALUE_STRUCT_CONVENTION;
2315 aarch64_store_return_value (valtype, regcache, writebuf);
2318 aarch64_extract_return_value (valtype, regcache, readbuf);
2321 debug_printf ("return value in registers\n");
2323 return RETURN_VALUE_REGISTER_CONVENTION;
2326 /* Implement the "get_longjmp_target" gdbarch method. */
2329 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2332 gdb_byte buf[X_REGISTER_SIZE];
2333 struct gdbarch *gdbarch = get_frame_arch (frame);
2334 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2335 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2337 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2339 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2343 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2347 /* Implement the "gen_return_address" gdbarch method. */
2350 aarch64_gen_return_address (struct gdbarch *gdbarch,
2351 struct agent_expr *ax, struct axs_value *value,
2354 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2355 value->kind = axs_lvalue_register;
2356 value->u.reg = AARCH64_LR_REGNUM;
2360 /* Return the pseudo register name corresponding to register regnum. */
2363 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2365 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2367 static const char *const q_name[] =
2369 "q0", "q1", "q2", "q3",
2370 "q4", "q5", "q6", "q7",
2371 "q8", "q9", "q10", "q11",
2372 "q12", "q13", "q14", "q15",
2373 "q16", "q17", "q18", "q19",
2374 "q20", "q21", "q22", "q23",
2375 "q24", "q25", "q26", "q27",
2376 "q28", "q29", "q30", "q31",
2379 static const char *const d_name[] =
2381 "d0", "d1", "d2", "d3",
2382 "d4", "d5", "d6", "d7",
2383 "d8", "d9", "d10", "d11",
2384 "d12", "d13", "d14", "d15",
2385 "d16", "d17", "d18", "d19",
2386 "d20", "d21", "d22", "d23",
2387 "d24", "d25", "d26", "d27",
2388 "d28", "d29", "d30", "d31",
2391 static const char *const s_name[] =
2393 "s0", "s1", "s2", "s3",
2394 "s4", "s5", "s6", "s7",
2395 "s8", "s9", "s10", "s11",
2396 "s12", "s13", "s14", "s15",
2397 "s16", "s17", "s18", "s19",
2398 "s20", "s21", "s22", "s23",
2399 "s24", "s25", "s26", "s27",
2400 "s28", "s29", "s30", "s31",
2403 static const char *const h_name[] =
2405 "h0", "h1", "h2", "h3",
2406 "h4", "h5", "h6", "h7",
2407 "h8", "h9", "h10", "h11",
2408 "h12", "h13", "h14", "h15",
2409 "h16", "h17", "h18", "h19",
2410 "h20", "h21", "h22", "h23",
2411 "h24", "h25", "h26", "h27",
2412 "h28", "h29", "h30", "h31",
2415 static const char *const b_name[] =
2417 "b0", "b1", "b2", "b3",
2418 "b4", "b5", "b6", "b7",
2419 "b8", "b9", "b10", "b11",
2420 "b12", "b13", "b14", "b15",
2421 "b16", "b17", "b18", "b19",
2422 "b20", "b21", "b22", "b23",
2423 "b24", "b25", "b26", "b27",
2424 "b28", "b29", "b30", "b31",
2427 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2429 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2430 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2432 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2433 return d_name[p_regnum - AARCH64_D0_REGNUM];
2435 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2436 return s_name[p_regnum - AARCH64_S0_REGNUM];
2438 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2439 return h_name[p_regnum - AARCH64_H0_REGNUM];
2441 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2442 return b_name[p_regnum - AARCH64_B0_REGNUM];
2444 if (tdep->has_sve ())
2446 static const char *const sve_v_name[] =
2448 "v0", "v1", "v2", "v3",
2449 "v4", "v5", "v6", "v7",
2450 "v8", "v9", "v10", "v11",
2451 "v12", "v13", "v14", "v15",
2452 "v16", "v17", "v18", "v19",
2453 "v20", "v21", "v22", "v23",
2454 "v24", "v25", "v26", "v27",
2455 "v28", "v29", "v30", "v31",
2458 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2459 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2460 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2463 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2464 prevents it from being read by methods such as
2465 mi_cmd_trace_frame_collected. */
2466 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2469 internal_error (__FILE__, __LINE__,
2470 _("aarch64_pseudo_register_name: bad register number %d"),
2474 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2476 static struct type *
2477 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2479 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2481 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2483 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2484 return aarch64_vnq_type (gdbarch);
2486 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2487 return aarch64_vnd_type (gdbarch);
2489 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2490 return aarch64_vns_type (gdbarch);
2492 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2493 return aarch64_vnh_type (gdbarch);
2495 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2496 return aarch64_vnb_type (gdbarch);
2498 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2499 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2500 return aarch64_vnv_type (gdbarch);
2502 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2503 return builtin_type (gdbarch)->builtin_uint64;
2505 internal_error (__FILE__, __LINE__,
2506 _("aarch64_pseudo_register_type: bad register number %d"),
2510 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2513 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2514 struct reggroup *group)
2516 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2518 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2520 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2521 return group == all_reggroup || group == vector_reggroup;
2522 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2523 return (group == all_reggroup || group == vector_reggroup
2524 || group == float_reggroup);
2525 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2526 return (group == all_reggroup || group == vector_reggroup
2527 || group == float_reggroup);
2528 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2529 return group == all_reggroup || group == vector_reggroup;
2530 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2531 return group == all_reggroup || group == vector_reggroup;
2532 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2533 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2534 return group == all_reggroup || group == vector_reggroup;
2535 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2536 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2539 return group == all_reggroup;
2542 /* Helper for aarch64_pseudo_read_value. */
2544 static struct value *
2545 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2546 readable_regcache *regcache, int regnum_offset,
2547 int regsize, struct value *result_value)
2549 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2551 /* Enough space for a full vector register. */
2552 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2553 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2555 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2556 mark_value_bytes_unavailable (result_value, 0,
2557 TYPE_LENGTH (value_type (result_value)));
2559 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2561 return result_value;
2564 /* Implement the "pseudo_register_read_value" gdbarch method. */
2566 static struct value *
2567 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2570 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2571 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2573 VALUE_LVAL (result_value) = lval_register;
2574 VALUE_REGNUM (result_value) = regnum;
2576 regnum -= gdbarch_num_regs (gdbarch);
2578 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2579 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2580 regnum - AARCH64_Q0_REGNUM,
2581 Q_REGISTER_SIZE, result_value);
2583 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2584 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2585 regnum - AARCH64_D0_REGNUM,
2586 D_REGISTER_SIZE, result_value);
2588 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2589 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2590 regnum - AARCH64_S0_REGNUM,
2591 S_REGISTER_SIZE, result_value);
2593 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2594 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2595 regnum - AARCH64_H0_REGNUM,
2596 H_REGISTER_SIZE, result_value);
2598 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2599 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2600 regnum - AARCH64_B0_REGNUM,
2601 B_REGISTER_SIZE, result_value);
2603 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2604 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2605 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2606 regnum - AARCH64_SVE_V0_REGNUM,
2607 V_REGISTER_SIZE, result_value);
2609 gdb_assert_not_reached ("regnum out of bound");
2612 /* Helper for aarch64_pseudo_write. */
2615 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2616 int regnum_offset, int regsize, const gdb_byte *buf)
2618 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2620 /* Enough space for a full vector register. */
2621 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2622 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2624 /* Ensure the register buffer is zero, we want gdb writes of the
2625 various 'scalar' pseudo registers to behavior like architectural
2626 writes, register width bytes are written the remainder are set to
2628 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2630 memcpy (reg_buf, buf, regsize);
2631 regcache->raw_write (v_regnum, reg_buf);
2634 /* Implement the "pseudo_register_write" gdbarch method. */
2637 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2638 int regnum, const gdb_byte *buf)
2640 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2641 regnum -= gdbarch_num_regs (gdbarch);
2643 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2644 return aarch64_pseudo_write_1 (gdbarch, regcache,
2645 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2648 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2649 return aarch64_pseudo_write_1 (gdbarch, regcache,
2650 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2653 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2654 return aarch64_pseudo_write_1 (gdbarch, regcache,
2655 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2658 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2659 return aarch64_pseudo_write_1 (gdbarch, regcache,
2660 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2663 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2664 return aarch64_pseudo_write_1 (gdbarch, regcache,
2665 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2668 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2669 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2670 return aarch64_pseudo_write_1 (gdbarch, regcache,
2671 regnum - AARCH64_SVE_V0_REGNUM,
2672 V_REGISTER_SIZE, buf);
2674 gdb_assert_not_reached ("regnum out of bound");
2677 /* Callback function for user_reg_add. */
2679 static struct value *
2680 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2682 const int *reg_p = (const int *) baton;
2684 return value_of_register (*reg_p, frame);
2688 /* Implement the "software_single_step" gdbarch method, needed to
2689 single step through atomic sequences on AArch64. */
2691 static std::vector<CORE_ADDR>
2692 aarch64_software_single_step (struct regcache *regcache)
2694 struct gdbarch *gdbarch = regcache->arch ();
2695 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2696 const int insn_size = 4;
2697 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2698 CORE_ADDR pc = regcache_read_pc (regcache);
2699 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2701 CORE_ADDR closing_insn = 0;
2702 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2703 byte_order_for_code);
2706 int bc_insn_count = 0; /* Conditional branch instruction count. */
2707 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2710 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2713 /* Look for a Load Exclusive instruction which begins the sequence. */
2714 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2717 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2720 insn = read_memory_unsigned_integer (loc, insn_size,
2721 byte_order_for_code);
2723 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2725 /* Check if the instruction is a conditional branch. */
2726 if (inst.opcode->iclass == condbranch)
2728 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2730 if (bc_insn_count >= 1)
2733 /* It is, so we'll try to set a breakpoint at the destination. */
2734 breaks[1] = loc + inst.operands[0].imm.value;
2740 /* Look for the Store Exclusive which closes the atomic sequence. */
2741 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2748 /* We didn't find a closing Store Exclusive instruction, fall back. */
2752 /* Insert breakpoint after the end of the atomic sequence. */
2753 breaks[0] = loc + insn_size;
2755 /* Check for duplicated breakpoints, and also check that the second
2756 breakpoint is not within the atomic sequence. */
2758 && (breaks[1] == breaks[0]
2759 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2760 last_breakpoint = 0;
2762 std::vector<CORE_ADDR> next_pcs;
2764 /* Insert the breakpoint at the end of the sequence, and one at the
2765 destination of the conditional branch, if it exists. */
2766 for (index = 0; index <= last_breakpoint; index++)
2767 next_pcs.push_back (breaks[index]);
2772 struct aarch64_displaced_step_closure : public displaced_step_closure
2774 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2775 is being displaced stepping. */
2778 /* PC adjustment offset after displaced stepping. */
2779 int32_t pc_adjust = 0;
2782 /* Data when visiting instructions for displaced stepping. */
2784 struct aarch64_displaced_step_data
2786 struct aarch64_insn_data base;
2788 /* The address where the instruction will be executed at. */
2790 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2791 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2792 /* Number of instructions in INSN_BUF. */
2793 unsigned insn_count;
2794 /* Registers when doing displaced stepping. */
2795 struct regcache *regs;
2797 aarch64_displaced_step_closure *dsc;
2800 /* Implementation of aarch64_insn_visitor method "b". */
2803 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2804 struct aarch64_insn_data *data)
2806 struct aarch64_displaced_step_data *dsd
2807 = (struct aarch64_displaced_step_data *) data;
2808 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2810 if (can_encode_int32 (new_offset, 28))
2812 /* Emit B rather than BL, because executing BL on a new address
2813 will get the wrong address into LR. In order to avoid this,
2814 we emit B, and update LR if the instruction is BL. */
2815 emit_b (dsd->insn_buf, 0, new_offset);
2821 emit_nop (dsd->insn_buf);
2823 dsd->dsc->pc_adjust = offset;
2829 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2830 data->insn_addr + 4);
2834 /* Implementation of aarch64_insn_visitor method "b_cond". */
2837 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2838 struct aarch64_insn_data *data)
2840 struct aarch64_displaced_step_data *dsd
2841 = (struct aarch64_displaced_step_data *) data;
2843 /* GDB has to fix up PC after displaced step this instruction
2844 differently according to the condition is true or false. Instead
2845 of checking COND against conditional flags, we can use
2846 the following instructions, and GDB can tell how to fix up PC
2847 according to the PC value.
2849 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2855 emit_bcond (dsd->insn_buf, cond, 8);
2857 dsd->dsc->pc_adjust = offset;
2858 dsd->insn_count = 1;
2861 /* Dynamically allocate a new register. If we know the register
2862 statically, we should make it a global as above instead of using this
2865 static struct aarch64_register
2866 aarch64_register (unsigned num, int is64)
2868 return (struct aarch64_register) { num, is64 };
2871 /* Implementation of aarch64_insn_visitor method "cb". */
2874 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2875 const unsigned rn, int is64,
2876 struct aarch64_insn_data *data)
2878 struct aarch64_displaced_step_data *dsd
2879 = (struct aarch64_displaced_step_data *) data;
2881 /* The offset is out of range for a compare and branch
2882 instruction. We can use the following instructions instead:
2884 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2889 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2890 dsd->insn_count = 1;
2892 dsd->dsc->pc_adjust = offset;
2895 /* Implementation of aarch64_insn_visitor method "tb". */
2898 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2899 const unsigned rt, unsigned bit,
2900 struct aarch64_insn_data *data)
2902 struct aarch64_displaced_step_data *dsd
2903 = (struct aarch64_displaced_step_data *) data;
2905 /* The offset is out of range for a test bit and branch
2906 instruction We can use the following instructions instead:
2908 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2914 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2915 dsd->insn_count = 1;
2917 dsd->dsc->pc_adjust = offset;
2920 /* Implementation of aarch64_insn_visitor method "adr". */
2923 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2924 const int is_adrp, struct aarch64_insn_data *data)
2926 struct aarch64_displaced_step_data *dsd
2927 = (struct aarch64_displaced_step_data *) data;
2928 /* We know exactly the address the ADR{P,} instruction will compute.
2929 We can just write it to the destination register. */
2930 CORE_ADDR address = data->insn_addr + offset;
2934 /* Clear the lower 12 bits of the offset to get the 4K page. */
2935 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2939 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2942 dsd->dsc->pc_adjust = 4;
2943 emit_nop (dsd->insn_buf);
2944 dsd->insn_count = 1;
2947 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2950 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2951 const unsigned rt, const int is64,
2952 struct aarch64_insn_data *data)
2954 struct aarch64_displaced_step_data *dsd
2955 = (struct aarch64_displaced_step_data *) data;
2956 CORE_ADDR address = data->insn_addr + offset;
2957 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2959 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2963 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2964 aarch64_register (rt, 1), zero);
2966 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2967 aarch64_register (rt, 1), zero);
2969 dsd->dsc->pc_adjust = 4;
2972 /* Implementation of aarch64_insn_visitor method "others". */
2975 aarch64_displaced_step_others (const uint32_t insn,
2976 struct aarch64_insn_data *data)
2978 struct aarch64_displaced_step_data *dsd
2979 = (struct aarch64_displaced_step_data *) data;
2981 aarch64_emit_insn (dsd->insn_buf, insn);
2982 dsd->insn_count = 1;
2984 if ((insn & 0xfffffc1f) == 0xd65f0000)
2987 dsd->dsc->pc_adjust = 0;
2990 dsd->dsc->pc_adjust = 4;
2993 static const struct aarch64_insn_visitor visitor =
2995 aarch64_displaced_step_b,
2996 aarch64_displaced_step_b_cond,
2997 aarch64_displaced_step_cb,
2998 aarch64_displaced_step_tb,
2999 aarch64_displaced_step_adr,
3000 aarch64_displaced_step_ldr_literal,
3001 aarch64_displaced_step_others,
3004 /* Implement the "displaced_step_copy_insn" gdbarch method. */
3006 struct displaced_step_closure *
3007 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3008 CORE_ADDR from, CORE_ADDR to,
3009 struct regcache *regs)
3011 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3012 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
3013 struct aarch64_displaced_step_data dsd;
3016 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3019 /* Look for a Load Exclusive instruction which begins the sequence. */
3020 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3022 /* We can't displaced step atomic sequences. */
3026 std::unique_ptr<aarch64_displaced_step_closure> dsc
3027 (new aarch64_displaced_step_closure);
3028 dsd.base.insn_addr = from;
3031 dsd.dsc = dsc.get ();
3033 aarch64_relocate_instruction (insn, &visitor,
3034 (struct aarch64_insn_data *) &dsd);
3035 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
3037 if (dsd.insn_count != 0)
3041 /* Instruction can be relocated to scratch pad. Copy
3042 relocated instruction(s) there. */
3043 for (i = 0; i < dsd.insn_count; i++)
3045 if (debug_displaced)
3047 debug_printf ("displaced: writing insn ");
3048 debug_printf ("%.8x", dsd.insn_buf[i]);
3049 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3051 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3052 (ULONGEST) dsd.insn_buf[i]);
3060 return dsc.release ();
3063 /* Implement the "displaced_step_fixup" gdbarch method. */
3066 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3067 struct displaced_step_closure *dsc_,
3068 CORE_ADDR from, CORE_ADDR to,
3069 struct regcache *regs)
3071 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3077 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3080 /* Condition is true. */
3082 else if (pc - to == 4)
3084 /* Condition is false. */
3088 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3091 if (dsc->pc_adjust != 0)
3093 if (debug_displaced)
3095 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3096 paddress (gdbarch, from), dsc->pc_adjust);
3098 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3099 from + dsc->pc_adjust);
3103 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3106 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3107 struct displaced_step_closure *closure)
3112 /* Get the correct target description for the given VQ value.
3113 If VQ is zero then it is assumed SVE is not supported.
3114 (It is not possible to set VQ to zero on an SVE system). */
3117 aarch64_read_description (uint64_t vq, bool pauth_p)
3119 if (vq > AARCH64_MAX_SVE_VQ)
3120 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3121 AARCH64_MAX_SVE_VQ);
3123 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3127 tdesc = aarch64_create_target_description (vq, pauth_p);
3128 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3134 /* Return the VQ used when creating the target description TDESC. */
3137 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3139 const struct tdesc_feature *feature_sve;
3141 if (!tdesc_has_registers (tdesc))
3144 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3146 if (feature_sve == nullptr)
3149 uint64_t vl = tdesc_register_bitsize (feature_sve,
3150 aarch64_sve_register_names[0]) / 8;
3151 return sve_vq_from_vl (vl);
3154 /* Add all the expected register sets into GDBARCH. */
3157 aarch64_add_reggroups (struct gdbarch *gdbarch)
3159 reggroup_add (gdbarch, general_reggroup);
3160 reggroup_add (gdbarch, float_reggroup);
3161 reggroup_add (gdbarch, system_reggroup);
3162 reggroup_add (gdbarch, vector_reggroup);
3163 reggroup_add (gdbarch, all_reggroup);
3164 reggroup_add (gdbarch, save_reggroup);
3165 reggroup_add (gdbarch, restore_reggroup);
3168 /* Implement the "cannot_store_register" gdbarch method. */
3171 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3173 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3175 if (!tdep->has_pauth ())
3178 /* Pointer authentication registers are read-only. */
3179 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3180 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3183 /* Initialize the current architecture based on INFO. If possible,
3184 re-use an architecture from ARCHES, which is a list of
3185 architectures already created during this debugging session.
3187 Called e.g. at program startup, when reading a core file, and when
3188 reading a binary file. */
3190 static struct gdbarch *
3191 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3193 struct gdbarch_tdep *tdep;
3194 struct gdbarch *gdbarch;
3195 struct gdbarch_list *best_arch;
3196 struct tdesc_arch_data *tdesc_data = NULL;
3197 const struct target_desc *tdesc = info.target_desc;
3200 const struct tdesc_feature *feature_core;
3201 const struct tdesc_feature *feature_fpu;
3202 const struct tdesc_feature *feature_sve;
3203 const struct tdesc_feature *feature_pauth;
3205 int num_pseudo_regs = 0;
3206 int first_pauth_regnum = -1;
3207 int pauth_ra_state_offset = -1;
3209 /* Ensure we always have a target description. */
3210 if (!tdesc_has_registers (tdesc))
3211 tdesc = aarch64_read_description (0, false);
3214 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
3215 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3216 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3217 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3219 if (feature_core == NULL)
3222 tdesc_data = tdesc_data_alloc ();
3224 /* Validate the description provides the mandatory core R registers
3225 and allocate their numbers. */
3226 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3227 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3228 AARCH64_X0_REGNUM + i,
3229 aarch64_r_register_names[i]);
3231 num_regs = AARCH64_X0_REGNUM + i;
3233 /* Add the V registers. */
3234 if (feature_fpu != NULL)
3236 if (feature_sve != NULL)
3237 error (_("Program contains both fpu and SVE features."));
3239 /* Validate the description provides the mandatory V registers
3240 and allocate their numbers. */
3241 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3242 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3243 AARCH64_V0_REGNUM + i,
3244 aarch64_v_register_names[i]);
3246 num_regs = AARCH64_V0_REGNUM + i;
3249 /* Add the SVE registers. */
3250 if (feature_sve != NULL)
3252 /* Validate the description provides the mandatory SVE registers
3253 and allocate their numbers. */
3254 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3255 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3256 AARCH64_SVE_Z0_REGNUM + i,
3257 aarch64_sve_register_names[i]);
3259 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3260 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3263 if (feature_fpu != NULL || feature_sve != NULL)
3265 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3266 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3267 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3268 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3269 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3272 /* Add the pauth registers. */
3273 if (feature_pauth != NULL)
3275 first_pauth_regnum = num_regs;
3276 pauth_ra_state_offset = num_pseudo_regs;
3277 /* Validate the descriptor provides the mandatory PAUTH registers and
3278 allocate their numbers. */
3279 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3280 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3281 first_pauth_regnum + i,
3282 aarch64_pauth_register_names[i]);
3285 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3290 tdesc_data_cleanup (tdesc_data);
3294 /* AArch64 code is always little-endian. */
3295 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3297 /* If there is already a candidate, use it. */
3298 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3300 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3302 /* Found a match. */
3306 if (best_arch != NULL)
3308 if (tdesc_data != NULL)
3309 tdesc_data_cleanup (tdesc_data);
3310 return best_arch->gdbarch;
3313 tdep = XCNEW (struct gdbarch_tdep);
3314 gdbarch = gdbarch_alloc (&info, tdep);
3316 /* This should be low enough for everything. */
3317 tdep->lowest_pc = 0x20;
3318 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3319 tdep->jb_elt_size = 8;
3320 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3321 tdep->pauth_reg_base = first_pauth_regnum;
3322 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3323 : pauth_ra_state_offset + num_regs;
3326 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3327 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3329 /* Advance PC across function entry code. */
3330 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3332 /* The stack grows downward. */
3333 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3335 /* Breakpoint manipulation. */
3336 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3337 aarch64_breakpoint::kind_from_pc);
3338 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3339 aarch64_breakpoint::bp_from_kind);
3340 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3341 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3343 /* Information about registers, etc. */
3344 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3345 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3346 set_gdbarch_num_regs (gdbarch, num_regs);
3348 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3349 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3350 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3351 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3352 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3353 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3354 aarch64_pseudo_register_reggroup_p);
3355 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3358 set_gdbarch_short_bit (gdbarch, 16);
3359 set_gdbarch_int_bit (gdbarch, 32);
3360 set_gdbarch_float_bit (gdbarch, 32);
3361 set_gdbarch_double_bit (gdbarch, 64);
3362 set_gdbarch_long_double_bit (gdbarch, 128);
3363 set_gdbarch_long_bit (gdbarch, 64);
3364 set_gdbarch_long_long_bit (gdbarch, 64);
3365 set_gdbarch_ptr_bit (gdbarch, 64);
3366 set_gdbarch_char_signed (gdbarch, 0);
3367 set_gdbarch_wchar_signed (gdbarch, 0);
3368 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3369 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3370 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3372 /* Internal <-> external register number maps. */
3373 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3375 /* Returning results. */
3376 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3379 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3381 /* Virtual tables. */
3382 set_gdbarch_vbit_in_delta (gdbarch, 1);
3384 /* Register architecture. */
3385 aarch64_add_reggroups (gdbarch);
3387 /* Hook in the ABI-specific overrides, if they have been registered. */
3388 info.target_desc = tdesc;
3389 info.tdesc_data = tdesc_data;
3390 gdbarch_init_osabi (info, gdbarch);
3392 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3393 /* Register DWARF CFA vendor handler. */
3394 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3395 aarch64_execute_dwarf_cfa_vendor_op);
3397 /* Add some default predicates. */
3398 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3399 dwarf2_append_unwinders (gdbarch);
3400 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3402 frame_base_set_default (gdbarch, &aarch64_normal_base);
3404 /* Now we have tuned the configuration, set a few final things,
3405 based on what the OS ABI has told us. */
3407 if (tdep->jb_pc >= 0)
3408 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3410 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3412 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3414 /* Add standard register aliases. */
3415 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3416 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3417 value_of_aarch64_user_reg,
3418 &aarch64_register_aliases[i].regnum);
3420 register_aarch64_ravenscar_ops (gdbarch);
3426 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3428 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3433 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3434 paddress (gdbarch, tdep->lowest_pc));
3440 static void aarch64_process_record_test (void);
3445 _initialize_aarch64_tdep (void)
3447 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3450 /* Debug this file's internals. */
3451 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3452 Set AArch64 debugging."), _("\
3453 Show AArch64 debugging."), _("\
3454 When on, AArch64 specific debugging is enabled."),
3457 &setdebuglist, &showdebuglist);
3460 selftests::register_test ("aarch64-analyze-prologue",
3461 selftests::aarch64_analyze_prologue_test);
3462 selftests::register_test ("aarch64-process-record",
3463 selftests::aarch64_process_record_test);
3464 selftests::record_xml_tdesc ("aarch64.xml",
3465 aarch64_create_target_description (0, false));
3469 /* AArch64 process record-replay related structures, defines etc. */
3471 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3474 unsigned int reg_len = LENGTH; \
3477 REGS = XNEWVEC (uint32_t, reg_len); \
3478 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3483 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3486 unsigned int mem_len = LENGTH; \
3489 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3490 memcpy(&MEMS->len, &RECORD_BUF[0], \
3491 sizeof(struct aarch64_mem_r) * LENGTH); \
3496 /* AArch64 record/replay structures and enumerations. */
3498 struct aarch64_mem_r
3500 uint64_t len; /* Record length. */
3501 uint64_t addr; /* Memory address. */
3504 enum aarch64_record_result
3506 AARCH64_RECORD_SUCCESS,
3507 AARCH64_RECORD_UNSUPPORTED,
3508 AARCH64_RECORD_UNKNOWN
3511 typedef struct insn_decode_record_t
3513 struct gdbarch *gdbarch;
3514 struct regcache *regcache;
3515 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3516 uint32_t aarch64_insn; /* Insn to be recorded. */
3517 uint32_t mem_rec_count; /* Count of memory records. */
3518 uint32_t reg_rec_count; /* Count of register records. */
3519 uint32_t *aarch64_regs; /* Registers to be recorded. */
3520 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3521 } insn_decode_record;
3523 /* Record handler for data processing - register instructions. */
3526 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3528 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3529 uint32_t record_buf[4];
3531 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3532 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3533 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3535 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3539 /* Logical (shifted register). */
3540 if (insn_bits24_27 == 0x0a)
3541 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3543 else if (insn_bits24_27 == 0x0b)
3544 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3546 return AARCH64_RECORD_UNKNOWN;
3548 record_buf[0] = reg_rd;
3549 aarch64_insn_r->reg_rec_count = 1;
3551 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3555 if (insn_bits24_27 == 0x0b)
3557 /* Data-processing (3 source). */
3558 record_buf[0] = reg_rd;
3559 aarch64_insn_r->reg_rec_count = 1;
3561 else if (insn_bits24_27 == 0x0a)
3563 if (insn_bits21_23 == 0x00)
3565 /* Add/subtract (with carry). */
3566 record_buf[0] = reg_rd;
3567 aarch64_insn_r->reg_rec_count = 1;
3568 if (bit (aarch64_insn_r->aarch64_insn, 29))
3570 record_buf[1] = AARCH64_CPSR_REGNUM;
3571 aarch64_insn_r->reg_rec_count = 2;
3574 else if (insn_bits21_23 == 0x02)
3576 /* Conditional compare (register) and conditional compare
3577 (immediate) instructions. */
3578 record_buf[0] = AARCH64_CPSR_REGNUM;
3579 aarch64_insn_r->reg_rec_count = 1;
3581 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3583 /* CConditional select. */
3584 /* Data-processing (2 source). */
3585 /* Data-processing (1 source). */
3586 record_buf[0] = reg_rd;
3587 aarch64_insn_r->reg_rec_count = 1;
3590 return AARCH64_RECORD_UNKNOWN;
3594 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3596 return AARCH64_RECORD_SUCCESS;
3599 /* Record handler for data processing - immediate instructions. */
3602 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3604 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3605 uint32_t record_buf[4];
3607 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3608 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3609 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3611 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3612 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3613 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3615 record_buf[0] = reg_rd;
3616 aarch64_insn_r->reg_rec_count = 1;
3618 else if (insn_bits24_27 == 0x01)
3620 /* Add/Subtract (immediate). */
3621 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3622 record_buf[0] = reg_rd;
3623 aarch64_insn_r->reg_rec_count = 1;
3625 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3627 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3629 /* Logical (immediate). */
3630 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3631 record_buf[0] = reg_rd;
3632 aarch64_insn_r->reg_rec_count = 1;
3634 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3637 return AARCH64_RECORD_UNKNOWN;
3639 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3641 return AARCH64_RECORD_SUCCESS;
3644 /* Record handler for branch, exception generation and system instructions. */
3647 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3649 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3650 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3651 uint32_t record_buf[4];
3653 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3654 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3655 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3657 if (insn_bits28_31 == 0x0d)
3659 /* Exception generation instructions. */
3660 if (insn_bits24_27 == 0x04)
3662 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3663 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3664 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3666 ULONGEST svc_number;
3668 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3670 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3674 return AARCH64_RECORD_UNSUPPORTED;
3676 /* System instructions. */
3677 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3679 uint32_t reg_rt, reg_crn;
3681 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3682 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3684 /* Record rt in case of sysl and mrs instructions. */
3685 if (bit (aarch64_insn_r->aarch64_insn, 21))
3687 record_buf[0] = reg_rt;
3688 aarch64_insn_r->reg_rec_count = 1;
3690 /* Record cpsr for hint and msr(immediate) instructions. */
3691 else if (reg_crn == 0x02 || reg_crn == 0x04)
3693 record_buf[0] = AARCH64_CPSR_REGNUM;
3694 aarch64_insn_r->reg_rec_count = 1;
3697 /* Unconditional branch (register). */
3698 else if((insn_bits24_27 & 0x0e) == 0x06)
3700 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3701 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3702 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3705 return AARCH64_RECORD_UNKNOWN;
3707 /* Unconditional branch (immediate). */
3708 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3710 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3711 if (bit (aarch64_insn_r->aarch64_insn, 31))
3712 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3715 /* Compare & branch (immediate), Test & branch (immediate) and
3716 Conditional branch (immediate). */
3717 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3719 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3721 return AARCH64_RECORD_SUCCESS;
3724 /* Record handler for advanced SIMD load and store instructions. */
3727 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3730 uint64_t addr_offset = 0;
3731 uint32_t record_buf[24];
3732 uint64_t record_buf_mem[24];
3733 uint32_t reg_rn, reg_rt;
3734 uint32_t reg_index = 0, mem_index = 0;
3735 uint8_t opcode_bits, size_bits;
3737 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3738 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3739 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3740 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3741 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3744 debug_printf ("Process record: Advanced SIMD load/store\n");
3746 /* Load/store single structure. */
3747 if (bit (aarch64_insn_r->aarch64_insn, 24))
3749 uint8_t sindex, scale, selem, esize, replicate = 0;
3750 scale = opcode_bits >> 2;
3751 selem = ((opcode_bits & 0x02) |
3752 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3756 if (size_bits & 0x01)
3757 return AARCH64_RECORD_UNKNOWN;
3760 if ((size_bits >> 1) & 0x01)
3761 return AARCH64_RECORD_UNKNOWN;
3762 if (size_bits & 0x01)
3764 if (!((opcode_bits >> 1) & 0x01))
3767 return AARCH64_RECORD_UNKNOWN;
3771 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3778 return AARCH64_RECORD_UNKNOWN;
3784 for (sindex = 0; sindex < selem; sindex++)
3786 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3787 reg_rt = (reg_rt + 1) % 32;
3791 for (sindex = 0; sindex < selem; sindex++)
3793 if (bit (aarch64_insn_r->aarch64_insn, 22))
3794 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3797 record_buf_mem[mem_index++] = esize / 8;
3798 record_buf_mem[mem_index++] = address + addr_offset;
3800 addr_offset = addr_offset + (esize / 8);
3801 reg_rt = (reg_rt + 1) % 32;
3805 /* Load/store multiple structure. */
3808 uint8_t selem, esize, rpt, elements;
3809 uint8_t eindex, rindex;
3811 esize = 8 << size_bits;
3812 if (bit (aarch64_insn_r->aarch64_insn, 30))
3813 elements = 128 / esize;
3815 elements = 64 / esize;
3817 switch (opcode_bits)
3819 /*LD/ST4 (4 Registers). */
3824 /*LD/ST1 (4 Registers). */
3829 /*LD/ST3 (3 Registers). */
3834 /*LD/ST1 (3 Registers). */
3839 /*LD/ST1 (1 Register). */
3844 /*LD/ST2 (2 Registers). */
3849 /*LD/ST1 (2 Registers). */
3855 return AARCH64_RECORD_UNSUPPORTED;
3858 for (rindex = 0; rindex < rpt; rindex++)
3859 for (eindex = 0; eindex < elements; eindex++)
3861 uint8_t reg_tt, sindex;
3862 reg_tt = (reg_rt + rindex) % 32;
3863 for (sindex = 0; sindex < selem; sindex++)
3865 if (bit (aarch64_insn_r->aarch64_insn, 22))
3866 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3869 record_buf_mem[mem_index++] = esize / 8;
3870 record_buf_mem[mem_index++] = address + addr_offset;
3872 addr_offset = addr_offset + (esize / 8);
3873 reg_tt = (reg_tt + 1) % 32;
3878 if (bit (aarch64_insn_r->aarch64_insn, 23))
3879 record_buf[reg_index++] = reg_rn;
3881 aarch64_insn_r->reg_rec_count = reg_index;
3882 aarch64_insn_r->mem_rec_count = mem_index / 2;
3883 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3885 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3887 return AARCH64_RECORD_SUCCESS;
3890 /* Record handler for load and store instructions. */
3893 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3895 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3896 uint8_t insn_bit23, insn_bit21;
3897 uint8_t opc, size_bits, ld_flag, vector_flag;
3898 uint32_t reg_rn, reg_rt, reg_rt2;
3899 uint64_t datasize, offset;
3900 uint32_t record_buf[8];
3901 uint64_t record_buf_mem[8];
3904 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3905 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3906 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3907 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3908 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3909 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3910 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3911 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3912 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3913 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3914 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3916 /* Load/store exclusive. */
3917 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3920 debug_printf ("Process record: load/store exclusive\n");
3924 record_buf[0] = reg_rt;
3925 aarch64_insn_r->reg_rec_count = 1;
3928 record_buf[1] = reg_rt2;
3929 aarch64_insn_r->reg_rec_count = 2;
3935 datasize = (8 << size_bits) * 2;
3937 datasize = (8 << size_bits);
3938 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3940 record_buf_mem[0] = datasize / 8;
3941 record_buf_mem[1] = address;
3942 aarch64_insn_r->mem_rec_count = 1;
3945 /* Save register rs. */
3946 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3947 aarch64_insn_r->reg_rec_count = 1;
3951 /* Load register (literal) instructions decoding. */
3952 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3955 debug_printf ("Process record: load register (literal)\n");
3957 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3959 record_buf[0] = reg_rt;
3960 aarch64_insn_r->reg_rec_count = 1;
3962 /* All types of load/store pair instructions decoding. */
3963 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3966 debug_printf ("Process record: load/store pair\n");
3972 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3973 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3977 record_buf[0] = reg_rt;
3978 record_buf[1] = reg_rt2;
3980 aarch64_insn_r->reg_rec_count = 2;
3985 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3987 size_bits = size_bits >> 1;
3988 datasize = 8 << (2 + size_bits);
3989 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3990 offset = offset << (2 + size_bits);
3991 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3993 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3995 if (imm7_off & 0x40)
3996 address = address - offset;
3998 address = address + offset;
4001 record_buf_mem[0] = datasize / 8;
4002 record_buf_mem[1] = address;
4003 record_buf_mem[2] = datasize / 8;
4004 record_buf_mem[3] = address + (datasize / 8);
4005 aarch64_insn_r->mem_rec_count = 2;
4007 if (bit (aarch64_insn_r->aarch64_insn, 23))
4008 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4010 /* Load/store register (unsigned immediate) instructions. */
4011 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4013 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4023 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4025 /* PRFM (immediate) */
4026 return AARCH64_RECORD_SUCCESS;
4028 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4030 /* LDRSW (immediate) */
4044 debug_printf ("Process record: load/store (unsigned immediate):"
4045 " size %x V %d opc %x\n", size_bits, vector_flag,
4051 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4052 datasize = 8 << size_bits;
4053 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4055 offset = offset << size_bits;
4056 address = address + offset;
4058 record_buf_mem[0] = datasize >> 3;
4059 record_buf_mem[1] = address;
4060 aarch64_insn_r->mem_rec_count = 1;
4065 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4067 record_buf[0] = reg_rt;
4068 aarch64_insn_r->reg_rec_count = 1;
4071 /* Load/store register (register offset) instructions. */
4072 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4073 && insn_bits10_11 == 0x02 && insn_bit21)
4076 debug_printf ("Process record: load/store (register offset)\n");
4077 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4084 if (size_bits != 0x03)
4087 return AARCH64_RECORD_UNKNOWN;
4091 ULONGEST reg_rm_val;
4093 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4094 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
4095 if (bit (aarch64_insn_r->aarch64_insn, 12))
4096 offset = reg_rm_val << size_bits;
4098 offset = reg_rm_val;
4099 datasize = 8 << size_bits;
4100 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4102 address = address + offset;
4103 record_buf_mem[0] = datasize >> 3;
4104 record_buf_mem[1] = address;
4105 aarch64_insn_r->mem_rec_count = 1;
4110 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4112 record_buf[0] = reg_rt;
4113 aarch64_insn_r->reg_rec_count = 1;
4116 /* Load/store register (immediate and unprivileged) instructions. */
4117 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4122 debug_printf ("Process record: load/store "
4123 "(immediate and unprivileged)\n");
4125 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4132 if (size_bits != 0x03)
4135 return AARCH64_RECORD_UNKNOWN;
4140 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4141 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4142 datasize = 8 << size_bits;
4143 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4145 if (insn_bits10_11 != 0x01)
4147 if (imm9_off & 0x0100)
4148 address = address - offset;
4150 address = address + offset;
4152 record_buf_mem[0] = datasize >> 3;
4153 record_buf_mem[1] = address;
4154 aarch64_insn_r->mem_rec_count = 1;
4159 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4161 record_buf[0] = reg_rt;
4162 aarch64_insn_r->reg_rec_count = 1;
4164 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4165 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4167 /* Advanced SIMD load/store instructions. */
4169 return aarch64_record_asimd_load_store (aarch64_insn_r);
4171 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4173 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4175 return AARCH64_RECORD_SUCCESS;
4178 /* Record handler for data processing SIMD and floating point instructions. */
4181 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4183 uint8_t insn_bit21, opcode, rmode, reg_rd;
4184 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4185 uint8_t insn_bits11_14;
4186 uint32_t record_buf[2];
4188 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4189 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4190 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4191 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4192 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4193 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4194 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4195 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4196 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4199 debug_printf ("Process record: data processing SIMD/FP: ");
4201 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4203 /* Floating point - fixed point conversion instructions. */
4207 debug_printf ("FP - fixed point conversion");
4209 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4210 record_buf[0] = reg_rd;
4212 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4214 /* Floating point - conditional compare instructions. */
4215 else if (insn_bits10_11 == 0x01)
4218 debug_printf ("FP - conditional compare");
4220 record_buf[0] = AARCH64_CPSR_REGNUM;
4222 /* Floating point - data processing (2-source) and
4223 conditional select instructions. */
4224 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4227 debug_printf ("FP - DP (2-source)");
4229 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4231 else if (insn_bits10_11 == 0x00)
4233 /* Floating point - immediate instructions. */
4234 if ((insn_bits12_15 & 0x01) == 0x01
4235 || (insn_bits12_15 & 0x07) == 0x04)
4238 debug_printf ("FP - immediate");
4239 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4241 /* Floating point - compare instructions. */
4242 else if ((insn_bits12_15 & 0x03) == 0x02)
4245 debug_printf ("FP - immediate");
4246 record_buf[0] = AARCH64_CPSR_REGNUM;
4248 /* Floating point - integer conversions instructions. */
4249 else if (insn_bits12_15 == 0x00)
4251 /* Convert float to integer instruction. */
4252 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4255 debug_printf ("float to int conversion");
4257 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4259 /* Convert integer to float instruction. */
4260 else if ((opcode >> 1) == 0x01 && !rmode)
4263 debug_printf ("int to float conversion");
4265 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4267 /* Move float to integer instruction. */
4268 else if ((opcode >> 1) == 0x03)
4271 debug_printf ("move float to int");
4273 if (!(opcode & 0x01))
4274 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4276 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4279 return AARCH64_RECORD_UNKNOWN;
4282 return AARCH64_RECORD_UNKNOWN;
4285 return AARCH64_RECORD_UNKNOWN;
4287 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4290 debug_printf ("SIMD copy");
4292 /* Advanced SIMD copy instructions. */
4293 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4294 && !bit (aarch64_insn_r->aarch64_insn, 15)
4295 && bit (aarch64_insn_r->aarch64_insn, 10))
4297 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4298 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4300 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4303 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4305 /* All remaining floating point or advanced SIMD instructions. */
4309 debug_printf ("all remain");
4311 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4315 debug_printf ("\n");
4317 aarch64_insn_r->reg_rec_count++;
4318 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4319 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4321 return AARCH64_RECORD_SUCCESS;
4324 /* Decodes insns type and invokes its record handler. */
4327 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4329 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4331 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4332 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4333 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4334 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4336 /* Data processing - immediate instructions. */
4337 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4338 return aarch64_record_data_proc_imm (aarch64_insn_r);
4340 /* Branch, exception generation and system instructions. */
4341 if (ins_bit26 && !ins_bit27 && ins_bit28)
4342 return aarch64_record_branch_except_sys (aarch64_insn_r);
4344 /* Load and store instructions. */
4345 if (!ins_bit25 && ins_bit27)
4346 return aarch64_record_load_store (aarch64_insn_r);
4348 /* Data processing - register instructions. */
4349 if (ins_bit25 && !ins_bit26 && ins_bit27)
4350 return aarch64_record_data_proc_reg (aarch64_insn_r);
4352 /* Data processing - SIMD and floating point instructions. */
4353 if (ins_bit25 && ins_bit26 && ins_bit27)
4354 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4356 return AARCH64_RECORD_UNSUPPORTED;
4359 /* Cleans up local record registers and memory allocations. */
4362 deallocate_reg_mem (insn_decode_record *record)
4364 xfree (record->aarch64_regs);
4365 xfree (record->aarch64_mems);
4369 namespace selftests {
4372 aarch64_process_record_test (void)
4374 struct gdbarch_info info;
4377 gdbarch_info_init (&info);
4378 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4380 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4381 SELF_CHECK (gdbarch != NULL);
4383 insn_decode_record aarch64_record;
4385 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4386 aarch64_record.regcache = NULL;
4387 aarch64_record.this_addr = 0;
4388 aarch64_record.gdbarch = gdbarch;
4390 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4391 aarch64_record.aarch64_insn = 0xf9800020;
4392 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4393 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4394 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4395 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4397 deallocate_reg_mem (&aarch64_record);
4400 } // namespace selftests
4401 #endif /* GDB_SELF_TEST */
4403 /* Parse the current instruction and record the values of the registers and
4404 memory that will be changed in current instruction to record_arch_list
4405 return -1 if something is wrong. */
4408 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4409 CORE_ADDR insn_addr)
4411 uint32_t rec_no = 0;
4412 uint8_t insn_size = 4;
4414 gdb_byte buf[insn_size];
4415 insn_decode_record aarch64_record;
4417 memset (&buf[0], 0, insn_size);
4418 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4419 target_read_memory (insn_addr, &buf[0], insn_size);
4420 aarch64_record.aarch64_insn
4421 = (uint32_t) extract_unsigned_integer (&buf[0],
4423 gdbarch_byte_order (gdbarch));
4424 aarch64_record.regcache = regcache;
4425 aarch64_record.this_addr = insn_addr;
4426 aarch64_record.gdbarch = gdbarch;
4428 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4429 if (ret == AARCH64_RECORD_UNSUPPORTED)
4431 printf_unfiltered (_("Process record does not support instruction "
4432 "0x%0x at address %s.\n"),
4433 aarch64_record.aarch64_insn,
4434 paddress (gdbarch, insn_addr));
4440 /* Record registers. */
4441 record_full_arch_list_add_reg (aarch64_record.regcache,
4443 /* Always record register CPSR. */
4444 record_full_arch_list_add_reg (aarch64_record.regcache,
4445 AARCH64_CPSR_REGNUM);
4446 if (aarch64_record.aarch64_regs)
4447 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4448 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4449 aarch64_record.aarch64_regs[rec_no]))
4452 /* Record memories. */
4453 if (aarch64_record.aarch64_mems)
4454 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4455 if (record_full_arch_list_add_mem
4456 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4457 aarch64_record.aarch64_mems[rec_no].len))
4460 if (record_full_arch_list_add_end ())
4464 deallocate_reg_mem (&aarch64_record);