1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
47 #include "gdbsupport/selftest.h"
49 #include "aarch64-tdep.h"
50 #include "aarch64-ravenscar-thread.h"
53 #include "elf/aarch64.h"
55 #include "gdbsupport/vec.h"
58 #include "record-full.h"
59 #include "arch/aarch64-insn.h"
62 #include "opcode/aarch64.h"
65 #define submask(x) ((1L << ((x) + 1)) - 1)
66 #define bit(obj,st) (((obj) >> (st)) & 1)
67 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
69 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
71 #define HA_MAX_NUM_FLDS 4
73 /* All possible aarch64 target descriptors. */
74 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
76 /* The standard register names, and all the valid aliases for them. */
79 const char *const name;
81 } aarch64_register_aliases[] =
83 /* 64-bit register names. */
84 {"fp", AARCH64_FP_REGNUM},
85 {"lr", AARCH64_LR_REGNUM},
86 {"sp", AARCH64_SP_REGNUM},
88 /* 32-bit register names. */
89 {"w0", AARCH64_X0_REGNUM + 0},
90 {"w1", AARCH64_X0_REGNUM + 1},
91 {"w2", AARCH64_X0_REGNUM + 2},
92 {"w3", AARCH64_X0_REGNUM + 3},
93 {"w4", AARCH64_X0_REGNUM + 4},
94 {"w5", AARCH64_X0_REGNUM + 5},
95 {"w6", AARCH64_X0_REGNUM + 6},
96 {"w7", AARCH64_X0_REGNUM + 7},
97 {"w8", AARCH64_X0_REGNUM + 8},
98 {"w9", AARCH64_X0_REGNUM + 9},
99 {"w10", AARCH64_X0_REGNUM + 10},
100 {"w11", AARCH64_X0_REGNUM + 11},
101 {"w12", AARCH64_X0_REGNUM + 12},
102 {"w13", AARCH64_X0_REGNUM + 13},
103 {"w14", AARCH64_X0_REGNUM + 14},
104 {"w15", AARCH64_X0_REGNUM + 15},
105 {"w16", AARCH64_X0_REGNUM + 16},
106 {"w17", AARCH64_X0_REGNUM + 17},
107 {"w18", AARCH64_X0_REGNUM + 18},
108 {"w19", AARCH64_X0_REGNUM + 19},
109 {"w20", AARCH64_X0_REGNUM + 20},
110 {"w21", AARCH64_X0_REGNUM + 21},
111 {"w22", AARCH64_X0_REGNUM + 22},
112 {"w23", AARCH64_X0_REGNUM + 23},
113 {"w24", AARCH64_X0_REGNUM + 24},
114 {"w25", AARCH64_X0_REGNUM + 25},
115 {"w26", AARCH64_X0_REGNUM + 26},
116 {"w27", AARCH64_X0_REGNUM + 27},
117 {"w28", AARCH64_X0_REGNUM + 28},
118 {"w29", AARCH64_X0_REGNUM + 29},
119 {"w30", AARCH64_X0_REGNUM + 30},
122 {"ip0", AARCH64_X0_REGNUM + 16},
123 {"ip1", AARCH64_X0_REGNUM + 17}
126 /* The required core 'R' registers. */
127 static const char *const aarch64_r_register_names[] =
129 /* These registers must appear in consecutive RAW register number
130 order and they must begin with AARCH64_X0_REGNUM! */
131 "x0", "x1", "x2", "x3",
132 "x4", "x5", "x6", "x7",
133 "x8", "x9", "x10", "x11",
134 "x12", "x13", "x14", "x15",
135 "x16", "x17", "x18", "x19",
136 "x20", "x21", "x22", "x23",
137 "x24", "x25", "x26", "x27",
138 "x28", "x29", "x30", "sp",
142 /* The FP/SIMD 'V' registers. */
143 static const char *const aarch64_v_register_names[] =
145 /* These registers must appear in consecutive RAW register number
146 order and they must begin with AARCH64_V0_REGNUM! */
147 "v0", "v1", "v2", "v3",
148 "v4", "v5", "v6", "v7",
149 "v8", "v9", "v10", "v11",
150 "v12", "v13", "v14", "v15",
151 "v16", "v17", "v18", "v19",
152 "v20", "v21", "v22", "v23",
153 "v24", "v25", "v26", "v27",
154 "v28", "v29", "v30", "v31",
159 /* The SVE 'Z' and 'P' registers. */
160 static const char *const aarch64_sve_register_names[] =
162 /* These registers must appear in consecutive RAW register number
163 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
164 "z0", "z1", "z2", "z3",
165 "z4", "z5", "z6", "z7",
166 "z8", "z9", "z10", "z11",
167 "z12", "z13", "z14", "z15",
168 "z16", "z17", "z18", "z19",
169 "z20", "z21", "z22", "z23",
170 "z24", "z25", "z26", "z27",
171 "z28", "z29", "z30", "z31",
173 "p0", "p1", "p2", "p3",
174 "p4", "p5", "p6", "p7",
175 "p8", "p9", "p10", "p11",
176 "p12", "p13", "p14", "p15",
180 static const char *const aarch64_pauth_register_names[] =
182 /* Authentication mask for data pointer. */
184 /* Authentication mask for code pointer. */
188 /* AArch64 prologue cache structure. */
189 struct aarch64_prologue_cache
191 /* The program counter at the start of the function. It is used to
192 identify this frame as a prologue frame. */
195 /* The program counter at the time this frame was created; i.e. where
196 this function was called from. It is used to identify this frame as a
200 /* The stack pointer at the time this frame was created; i.e. the
201 caller's stack pointer when this function was called. It is used
202 to identify this frame. */
205 /* Is the target available to read from? */
208 /* The frame base for this frame is just prev_sp - frame size.
209 FRAMESIZE is the distance from the frame pointer to the
210 initial stack pointer. */
213 /* The register used to hold the frame pointer for this frame. */
216 /* Saved register offsets. */
217 struct trad_frame_saved_reg *saved_regs;
221 show_aarch64_debug (struct ui_file *file, int from_tty,
222 struct cmd_list_element *c, const char *value)
224 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
229 /* Abstract instruction reader. */
231 class abstract_instruction_reader
234 /* Read in one instruction. */
235 virtual ULONGEST read (CORE_ADDR memaddr, int len,
236 enum bfd_endian byte_order) = 0;
239 /* Instruction reader from real target. */
241 class instruction_reader : public abstract_instruction_reader
244 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
247 return read_code_unsigned_integer (memaddr, len, byte_order);
253 /* If address signing is enabled, mask off the signature bits from the link
254 register, which is passed by value in ADDR, using the register values in
258 aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep,
259 struct frame_info *this_frame, CORE_ADDR addr)
261 if (tdep->has_pauth ()
262 && frame_unwind_register_unsigned (this_frame,
263 tdep->pauth_ra_state_regnum))
265 int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
266 CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
267 addr = addr & ~cmask;
269 /* Record in the frame that the link register required unmasking. */
270 set_frame_previous_pc_masked (this_frame);
276 /* Implement the "get_pc_address_flags" gdbarch method. */
279 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
281 if (pc != 0 && get_frame_pc_masked (frame))
287 /* Analyze a prologue, looking for a recognizable stack frame
288 and frame pointer. Scan until we encounter a store that could
289 clobber the stack frame unexpectedly, or an unknown instruction. */
292 aarch64_analyze_prologue (struct gdbarch *gdbarch,
293 CORE_ADDR start, CORE_ADDR limit,
294 struct aarch64_prologue_cache *cache,
295 abstract_instruction_reader& reader)
297 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
299 /* Track X registers and D registers in prologue. */
300 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
302 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
303 regs[i] = pv_register (i, 0);
304 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
306 for (; start < limit; start += 4)
311 insn = reader.read (start, 4, byte_order_for_code);
313 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
316 if (inst.opcode->iclass == addsub_imm
317 && (inst.opcode->op == OP_ADD
318 || strcmp ("sub", inst.opcode->name) == 0))
320 unsigned rd = inst.operands[0].reg.regno;
321 unsigned rn = inst.operands[1].reg.regno;
323 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
324 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
325 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
326 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
328 if (inst.opcode->op == OP_ADD)
330 regs[rd] = pv_add_constant (regs[rn],
331 inst.operands[2].imm.value);
335 regs[rd] = pv_add_constant (regs[rn],
336 -inst.operands[2].imm.value);
339 else if (inst.opcode->iclass == pcreladdr
340 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
342 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
343 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
345 regs[inst.operands[0].reg.regno] = pv_unknown ();
347 else if (inst.opcode->iclass == branch_imm)
349 /* Stop analysis on branch. */
352 else if (inst.opcode->iclass == condbranch)
354 /* Stop analysis on branch. */
357 else if (inst.opcode->iclass == branch_reg)
359 /* Stop analysis on branch. */
362 else if (inst.opcode->iclass == compbranch)
364 /* Stop analysis on branch. */
367 else if (inst.opcode->op == OP_MOVZ)
369 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
370 regs[inst.operands[0].reg.regno] = pv_unknown ();
372 else if (inst.opcode->iclass == log_shift
373 && strcmp (inst.opcode->name, "orr") == 0)
375 unsigned rd = inst.operands[0].reg.regno;
376 unsigned rn = inst.operands[1].reg.regno;
377 unsigned rm = inst.operands[2].reg.regno;
379 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
380 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
381 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
383 if (inst.operands[2].shifter.amount == 0
384 && rn == AARCH64_SP_REGNUM)
390 debug_printf ("aarch64: prologue analysis gave up "
391 "addr=%s opcode=0x%x (orr x register)\n",
392 core_addr_to_string_nz (start), insn);
397 else if (inst.opcode->op == OP_STUR)
399 unsigned rt = inst.operands[0].reg.regno;
400 unsigned rn = inst.operands[1].addr.base_regno;
401 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
403 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
404 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
405 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
406 gdb_assert (!inst.operands[1].addr.offset.is_reg);
409 (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
412 else if ((inst.opcode->iclass == ldstpair_off
413 || (inst.opcode->iclass == ldstpair_indexed
414 && inst.operands[2].addr.preind))
415 && strcmp ("stp", inst.opcode->name) == 0)
417 /* STP with addressing mode Pre-indexed and Base register. */
420 unsigned rn = inst.operands[2].addr.base_regno;
421 int32_t imm = inst.operands[2].addr.offset.imm;
422 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
424 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
425 || inst.operands[0].type == AARCH64_OPND_Ft);
426 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
427 || inst.operands[1].type == AARCH64_OPND_Ft2);
428 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
429 gdb_assert (!inst.operands[2].addr.offset.is_reg);
431 /* If recording this store would invalidate the store area
432 (perhaps because rn is not known) then we should abandon
433 further prologue analysis. */
434 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
437 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
440 rt1 = inst.operands[0].reg.regno;
441 rt2 = inst.operands[1].reg.regno;
442 if (inst.operands[0].type == AARCH64_OPND_Ft)
444 rt1 += AARCH64_X_REGISTER_COUNT;
445 rt2 += AARCH64_X_REGISTER_COUNT;
448 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
449 stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
451 if (inst.operands[2].addr.writeback)
452 regs[rn] = pv_add_constant (regs[rn], imm);
455 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
456 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
457 && (inst.opcode->op == OP_STR_POS
458 || inst.opcode->op == OP_STRF_POS)))
459 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
460 && strcmp ("str", inst.opcode->name) == 0)
462 /* STR (immediate) */
463 unsigned int rt = inst.operands[0].reg.regno;
464 int32_t imm = inst.operands[1].addr.offset.imm;
465 unsigned int rn = inst.operands[1].addr.base_regno;
466 int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
467 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
468 || inst.operands[0].type == AARCH64_OPND_Ft);
470 if (inst.operands[0].type == AARCH64_OPND_Ft)
471 rt += AARCH64_X_REGISTER_COUNT;
473 stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
474 if (inst.operands[1].addr.writeback)
475 regs[rn] = pv_add_constant (regs[rn], imm);
477 else if (inst.opcode->iclass == testbranch)
479 /* Stop analysis on branch. */
482 else if (inst.opcode->iclass == ic_system)
484 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
485 int ra_state_val = 0;
487 if (insn == 0xd503233f /* paciasp. */
488 || insn == 0xd503237f /* pacibsp. */)
490 /* Return addresses are mangled. */
493 else if (insn == 0xd50323bf /* autiasp. */
494 || insn == 0xd50323ff /* autibsp. */)
496 /* Return addresses are not mangled. */
502 debug_printf ("aarch64: prologue analysis gave up addr=%s"
503 " opcode=0x%x (iclass)\n",
504 core_addr_to_string_nz (start), insn);
508 if (tdep->has_pauth () && cache != nullptr)
509 trad_frame_set_value (cache->saved_regs,
510 tdep->pauth_ra_state_regnum,
517 debug_printf ("aarch64: prologue analysis gave up addr=%s"
519 core_addr_to_string_nz (start), insn);
528 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
530 /* Frame pointer is fp. Frame size is constant. */
531 cache->framereg = AARCH64_FP_REGNUM;
532 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
534 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
536 /* Try the stack pointer. */
537 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
538 cache->framereg = AARCH64_SP_REGNUM;
542 /* We're just out of luck. We don't know where the frame is. */
543 cache->framereg = -1;
544 cache->framesize = 0;
547 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
551 if (stack.find_reg (gdbarch, i, &offset))
552 cache->saved_regs[i].addr = offset;
555 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
557 int regnum = gdbarch_num_regs (gdbarch);
560 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
562 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
569 aarch64_analyze_prologue (struct gdbarch *gdbarch,
570 CORE_ADDR start, CORE_ADDR limit,
571 struct aarch64_prologue_cache *cache)
573 instruction_reader reader;
575 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
581 namespace selftests {
583 /* Instruction reader from manually cooked instruction sequences. */
585 class instruction_reader_test : public abstract_instruction_reader
588 template<size_t SIZE>
589 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
590 : m_insns (insns), m_insns_size (SIZE)
593 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
596 SELF_CHECK (len == 4);
597 SELF_CHECK (memaddr % 4 == 0);
598 SELF_CHECK (memaddr / 4 < m_insns_size);
600 return m_insns[memaddr / 4];
604 const uint32_t *m_insns;
609 aarch64_analyze_prologue_test (void)
611 struct gdbarch_info info;
613 gdbarch_info_init (&info);
614 info.bfd_arch_info = bfd_scan_arch ("aarch64");
616 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
617 SELF_CHECK (gdbarch != NULL);
619 struct aarch64_prologue_cache cache;
620 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
622 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
624 /* Test the simple prologue in which frame pointer is used. */
626 static const uint32_t insns[] = {
627 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
628 0x910003fd, /* mov x29, sp */
629 0x97ffffe6, /* bl 0x400580 */
631 instruction_reader_test reader (insns);
633 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
634 SELF_CHECK (end == 4 * 2);
636 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
637 SELF_CHECK (cache.framesize == 272);
639 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
641 if (i == AARCH64_FP_REGNUM)
642 SELF_CHECK (cache.saved_regs[i].addr == -272);
643 else if (i == AARCH64_LR_REGNUM)
644 SELF_CHECK (cache.saved_regs[i].addr == -264);
646 SELF_CHECK (cache.saved_regs[i].addr == -1);
649 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
651 int regnum = gdbarch_num_regs (gdbarch);
653 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
658 /* Test a prologue in which STR is used and frame pointer is not
661 static const uint32_t insns[] = {
662 0xf81d0ff3, /* str x19, [sp, #-48]! */
663 0xb9002fe0, /* str w0, [sp, #44] */
664 0xf90013e1, /* str x1, [sp, #32]*/
665 0xfd000fe0, /* str d0, [sp, #24] */
666 0xaa0203f3, /* mov x19, x2 */
667 0xf94013e0, /* ldr x0, [sp, #32] */
669 instruction_reader_test reader (insns);
671 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
672 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
674 SELF_CHECK (end == 4 * 5);
676 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
677 SELF_CHECK (cache.framesize == 48);
679 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
682 SELF_CHECK (cache.saved_regs[i].addr == -16);
684 SELF_CHECK (cache.saved_regs[i].addr == -48);
686 SELF_CHECK (cache.saved_regs[i].addr == -1);
689 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
691 int regnum = gdbarch_num_regs (gdbarch);
694 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
697 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
702 /* Test a prologue in which there is a return address signing instruction. */
703 if (tdep->has_pauth ())
705 static const uint32_t insns[] = {
706 0xd503233f, /* paciasp */
707 0xa9bd7bfd, /* stp x29, x30, [sp, #-48]! */
708 0x910003fd, /* mov x29, sp */
709 0xf801c3f3, /* str x19, [sp, #28] */
710 0xb9401fa0, /* ldr x19, [x29, #28] */
712 instruction_reader_test reader (insns);
714 trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
715 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
718 SELF_CHECK (end == 4 * 4);
719 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
720 SELF_CHECK (cache.framesize == 48);
722 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
725 SELF_CHECK (cache.saved_regs[i].addr == -20);
726 else if (i == AARCH64_FP_REGNUM)
727 SELF_CHECK (cache.saved_regs[i].addr == -48);
728 else if (i == AARCH64_LR_REGNUM)
729 SELF_CHECK (cache.saved_regs[i].addr == -40);
731 SELF_CHECK (cache.saved_regs[i].addr == -1);
734 if (tdep->has_pauth ())
736 SELF_CHECK (trad_frame_value_p (cache.saved_regs,
737 tdep->pauth_ra_state_regnum));
738 SELF_CHECK (cache.saved_regs[tdep->pauth_ra_state_regnum].addr == 1);
742 } // namespace selftests
743 #endif /* GDB_SELF_TEST */
745 /* Implement the "skip_prologue" gdbarch method. */
748 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
750 CORE_ADDR func_addr, limit_pc;
752 /* See if we can determine the end of the prologue via the symbol
753 table. If so, then return either PC, or the PC after the
754 prologue, whichever is greater. */
755 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
757 CORE_ADDR post_prologue_pc
758 = skip_prologue_using_sal (gdbarch, func_addr);
760 if (post_prologue_pc != 0)
761 return std::max (pc, post_prologue_pc);
764 /* Can't determine prologue from the symbol table, need to examine
767 /* Find an upper limit on the function prologue using the debug
768 information. If the debug information could not be used to
769 provide that bound, then use an arbitrary large number as the
771 limit_pc = skip_prologue_using_sal (gdbarch, pc);
773 limit_pc = pc + 128; /* Magic. */
775 /* Try disassembling prologue. */
776 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
779 /* Scan the function prologue for THIS_FRAME and populate the prologue
783 aarch64_scan_prologue (struct frame_info *this_frame,
784 struct aarch64_prologue_cache *cache)
786 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
787 CORE_ADDR prologue_start;
788 CORE_ADDR prologue_end;
789 CORE_ADDR prev_pc = get_frame_pc (this_frame);
790 struct gdbarch *gdbarch = get_frame_arch (this_frame);
792 cache->prev_pc = prev_pc;
794 /* Assume we do not find a frame. */
795 cache->framereg = -1;
796 cache->framesize = 0;
798 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
801 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
805 /* No line info so use the current PC. */
806 prologue_end = prev_pc;
808 else if (sal.end < prologue_end)
810 /* The next line begins after the function end. */
811 prologue_end = sal.end;
814 prologue_end = std::min (prologue_end, prev_pc);
815 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
821 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
825 cache->framereg = AARCH64_FP_REGNUM;
826 cache->framesize = 16;
827 cache->saved_regs[29].addr = 0;
828 cache->saved_regs[30].addr = 8;
832 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
833 function may throw an exception if the inferior's registers or memory is
837 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
838 struct aarch64_prologue_cache *cache)
840 CORE_ADDR unwound_fp;
843 aarch64_scan_prologue (this_frame, cache);
845 if (cache->framereg == -1)
848 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
852 cache->prev_sp = unwound_fp + cache->framesize;
854 /* Calculate actual addresses of saved registers using offsets
855 determined by aarch64_analyze_prologue. */
856 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
857 if (trad_frame_addr_p (cache->saved_regs, reg))
858 cache->saved_regs[reg].addr += cache->prev_sp;
860 cache->func = get_frame_func (this_frame);
862 cache->available_p = 1;
865 /* Allocate and fill in *THIS_CACHE with information about the prologue of
866 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
867 Return a pointer to the current aarch64_prologue_cache in
870 static struct aarch64_prologue_cache *
871 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
873 struct aarch64_prologue_cache *cache;
875 if (*this_cache != NULL)
876 return (struct aarch64_prologue_cache *) *this_cache;
878 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
879 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
884 aarch64_make_prologue_cache_1 (this_frame, cache);
886 catch (const gdb_exception_error &ex)
888 if (ex.error != NOT_AVAILABLE_ERROR)
895 /* Implement the "stop_reason" frame_unwind method. */
897 static enum unwind_stop_reason
898 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
901 struct aarch64_prologue_cache *cache
902 = aarch64_make_prologue_cache (this_frame, this_cache);
904 if (!cache->available_p)
905 return UNWIND_UNAVAILABLE;
907 /* Halt the backtrace at "_start". */
908 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
909 return UNWIND_OUTERMOST;
911 /* We've hit a wall, stop. */
912 if (cache->prev_sp == 0)
913 return UNWIND_OUTERMOST;
915 return UNWIND_NO_REASON;
918 /* Our frame ID for a normal frame is the current function's starting
919 PC and the caller's SP when we were called. */
922 aarch64_prologue_this_id (struct frame_info *this_frame,
923 void **this_cache, struct frame_id *this_id)
925 struct aarch64_prologue_cache *cache
926 = aarch64_make_prologue_cache (this_frame, this_cache);
928 if (!cache->available_p)
929 *this_id = frame_id_build_unavailable_stack (cache->func);
931 *this_id = frame_id_build (cache->prev_sp, cache->func);
934 /* Implement the "prev_register" frame_unwind method. */
936 static struct value *
937 aarch64_prologue_prev_register (struct frame_info *this_frame,
938 void **this_cache, int prev_regnum)
940 struct aarch64_prologue_cache *cache
941 = aarch64_make_prologue_cache (this_frame, this_cache);
943 /* If we are asked to unwind the PC, then we need to return the LR
944 instead. The prologue may save PC, but it will point into this
945 frame's prologue, not the next frame's resume location. */
946 if (prev_regnum == AARCH64_PC_REGNUM)
949 struct gdbarch *gdbarch = get_frame_arch (this_frame);
950 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
952 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
954 if (tdep->has_pauth ()
955 && trad_frame_value_p (cache->saved_regs,
956 tdep->pauth_ra_state_regnum))
957 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
959 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
962 /* SP is generally not saved to the stack, but this frame is
963 identified by the next frame's stack pointer at the time of the
964 call. The value was already reconstructed into PREV_SP. */
977 if (prev_regnum == AARCH64_SP_REGNUM)
978 return frame_unwind_got_constant (this_frame, prev_regnum,
981 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
985 /* AArch64 prologue unwinder. */
986 struct frame_unwind aarch64_prologue_unwind =
989 aarch64_prologue_frame_unwind_stop_reason,
990 aarch64_prologue_this_id,
991 aarch64_prologue_prev_register,
993 default_frame_sniffer
996 /* Allocate and fill in *THIS_CACHE with information about the prologue of
997 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
998 Return a pointer to the current aarch64_prologue_cache in
1001 static struct aarch64_prologue_cache *
1002 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1004 struct aarch64_prologue_cache *cache;
1006 if (*this_cache != NULL)
1007 return (struct aarch64_prologue_cache *) *this_cache;
1009 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1010 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1011 *this_cache = cache;
1015 cache->prev_sp = get_frame_register_unsigned (this_frame,
1017 cache->prev_pc = get_frame_pc (this_frame);
1018 cache->available_p = 1;
1020 catch (const gdb_exception_error &ex)
1022 if (ex.error != NOT_AVAILABLE_ERROR)
1029 /* Implement the "stop_reason" frame_unwind method. */
1031 static enum unwind_stop_reason
1032 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1035 struct aarch64_prologue_cache *cache
1036 = aarch64_make_stub_cache (this_frame, this_cache);
1038 if (!cache->available_p)
1039 return UNWIND_UNAVAILABLE;
1041 return UNWIND_NO_REASON;
1044 /* Our frame ID for a stub frame is the current SP and LR. */
1047 aarch64_stub_this_id (struct frame_info *this_frame,
1048 void **this_cache, struct frame_id *this_id)
1050 struct aarch64_prologue_cache *cache
1051 = aarch64_make_stub_cache (this_frame, this_cache);
1053 if (cache->available_p)
1054 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1056 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1059 /* Implement the "sniffer" frame_unwind method. */
1062 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1063 struct frame_info *this_frame,
1064 void **this_prologue_cache)
1066 CORE_ADDR addr_in_block;
1069 addr_in_block = get_frame_address_in_block (this_frame);
1070 if (in_plt_section (addr_in_block)
1071 /* We also use the stub winder if the target memory is unreadable
1072 to avoid having the prologue unwinder trying to read it. */
1073 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1079 /* AArch64 stub unwinder. */
1080 struct frame_unwind aarch64_stub_unwind =
1083 aarch64_stub_frame_unwind_stop_reason,
1084 aarch64_stub_this_id,
1085 aarch64_prologue_prev_register,
1087 aarch64_stub_unwind_sniffer
1090 /* Return the frame base address of *THIS_FRAME. */
1093 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1095 struct aarch64_prologue_cache *cache
1096 = aarch64_make_prologue_cache (this_frame, this_cache);
1098 return cache->prev_sp - cache->framesize;
1101 /* AArch64 default frame base information. */
1102 struct frame_base aarch64_normal_base =
1104 &aarch64_prologue_unwind,
1105 aarch64_normal_frame_base,
1106 aarch64_normal_frame_base,
1107 aarch64_normal_frame_base
1110 /* Return the value of the REGNUM register in the previous frame of
1113 static struct value *
1114 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1115 void **this_cache, int regnum)
1117 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1122 case AARCH64_PC_REGNUM:
1123 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1124 lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1125 return frame_unwind_got_constant (this_frame, regnum, lr);
1128 internal_error (__FILE__, __LINE__,
1129 _("Unexpected register %d"), regnum);
1133 static const unsigned char op_lit0 = DW_OP_lit0;
1134 static const unsigned char op_lit1 = DW_OP_lit1;
1136 /* Implement the "init_reg" dwarf2_frame_ops method. */
1139 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1140 struct dwarf2_frame_state_reg *reg,
1141 struct frame_info *this_frame)
1143 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1147 case AARCH64_PC_REGNUM:
1148 reg->how = DWARF2_FRAME_REG_FN;
1149 reg->loc.fn = aarch64_dwarf2_prev_register;
1152 case AARCH64_SP_REGNUM:
1153 reg->how = DWARF2_FRAME_REG_CFA;
1157 /* Init pauth registers. */
1158 if (tdep->has_pauth ())
1160 if (regnum == tdep->pauth_ra_state_regnum)
1162 /* Initialize RA_STATE to zero. */
1163 reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1164 reg->loc.exp.start = &op_lit0;
1165 reg->loc.exp.len = 1;
1168 else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1169 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1171 reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1177 /* Implement the execute_dwarf_cfa_vendor_op method. */
1180 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1181 struct dwarf2_frame_state *fs)
1183 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1184 struct dwarf2_frame_state_reg *ra_state;
1186 if (op == DW_CFA_AARCH64_negate_ra_state)
1188 /* On systems without pauth, treat as a nop. */
1189 if (!tdep->has_pauth ())
1192 /* Allocate RA_STATE column if it's not allocated yet. */
1193 fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1195 /* Toggle the status of RA_STATE between 0 and 1. */
1196 ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1197 ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1199 if (ra_state->loc.exp.start == nullptr
1200 || ra_state->loc.exp.start == &op_lit0)
1201 ra_state->loc.exp.start = &op_lit1;
1203 ra_state->loc.exp.start = &op_lit0;
1205 ra_state->loc.exp.len = 1;
1213 /* When arguments must be pushed onto the stack, they go on in reverse
1214 order. The code below implements a FILO (stack) to do this. */
1218 /* Value to pass on stack. It can be NULL if this item is for stack
1220 const gdb_byte *data;
1222 /* Size in bytes of value to pass on stack. */
1226 /* Implement the gdbarch type alignment method, overrides the generic
1227 alignment algorithm for anything that is aarch64 specific. */
1230 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1232 t = check_typedef (t);
1233 if (TYPE_CODE (t) == TYPE_CODE_ARRAY && TYPE_VECTOR (t))
1235 /* Use the natural alignment for vector types (the same for
1236 scalar type), but the maximum alignment is 128-bit. */
1237 if (TYPE_LENGTH (t) > 16)
1240 return TYPE_LENGTH (t);
1243 /* Allow the common code to calculate the alignment. */
1247 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1249 Return the number of register required, or -1 on failure.
1251 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1252 to the element, else fail if the type of this element does not match the
1256 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1257 struct type **fundamental_type)
1259 if (type == nullptr)
1262 switch (TYPE_CODE (type))
1265 if (TYPE_LENGTH (type) > 16)
1268 if (*fundamental_type == nullptr)
1269 *fundamental_type = type;
1270 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1271 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1276 case TYPE_CODE_COMPLEX:
1278 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1279 if (TYPE_LENGTH (target_type) > 16)
1282 if (*fundamental_type == nullptr)
1283 *fundamental_type = target_type;
1284 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1285 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1291 case TYPE_CODE_ARRAY:
1293 if (TYPE_VECTOR (type))
1295 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1298 if (*fundamental_type == nullptr)
1299 *fundamental_type = type;
1300 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1301 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1308 struct type *target_type = TYPE_TARGET_TYPE (type);
1309 int count = aapcs_is_vfp_call_or_return_candidate_1
1310 (target_type, fundamental_type);
1315 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1320 case TYPE_CODE_STRUCT:
1321 case TYPE_CODE_UNION:
1325 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1327 /* Ignore any static fields. */
1328 if (field_is_static (&TYPE_FIELD (type, i)))
1331 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1333 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1334 (member, fundamental_type);
1335 if (sub_count == -1)
1340 /* Ensure there is no padding between the fields (allowing for empty
1341 zero length structs) */
1342 int ftype_length = (*fundamental_type == nullptr)
1343 ? 0 : TYPE_LENGTH (*fundamental_type);
1344 if (count * ftype_length != TYPE_LENGTH (type))
1357 /* Return true if an argument, whose type is described by TYPE, can be passed or
1358 returned in simd/fp registers, providing enough parameter passing registers
1359 are available. This is as described in the AAPCS64.
1361 Upon successful return, *COUNT returns the number of needed registers,
1362 *FUNDAMENTAL_TYPE contains the type of those registers.
1364 Candidate as per the AAPCS64 5.4.2.C is either a:
1367 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1368 all the members are floats and has at most 4 members.
1369 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1370 all the members are short vectors and has at most 4 members.
1373 Note that HFAs and HVAs can include nested structures and arrays. */
1376 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1377 struct type **fundamental_type)
1379 if (type == nullptr)
1382 *fundamental_type = nullptr;
1384 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1387 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1396 /* AArch64 function call information structure. */
1397 struct aarch64_call_info
1399 /* the current argument number. */
1400 unsigned argnum = 0;
1402 /* The next general purpose register number, equivalent to NGRN as
1403 described in the AArch64 Procedure Call Standard. */
1406 /* The next SIMD and floating point register number, equivalent to
1407 NSRN as described in the AArch64 Procedure Call Standard. */
1410 /* The next stacked argument address, equivalent to NSAA as
1411 described in the AArch64 Procedure Call Standard. */
1414 /* Stack item vector. */
1415 std::vector<stack_item_t> si;
1418 /* Pass a value in a sequence of consecutive X registers. The caller
1419 is responsbile for ensuring sufficient registers are available. */
1422 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1423 struct aarch64_call_info *info, struct type *type,
1426 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1427 int len = TYPE_LENGTH (type);
1428 enum type_code typecode = TYPE_CODE (type);
1429 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1430 const bfd_byte *buf = value_contents (arg);
1436 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1437 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1441 /* Adjust sub-word struct/union args when big-endian. */
1442 if (byte_order == BFD_ENDIAN_BIG
1443 && partial_len < X_REGISTER_SIZE
1444 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1445 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1449 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1450 gdbarch_register_name (gdbarch, regnum),
1451 phex (regval, X_REGISTER_SIZE));
1453 regcache_cooked_write_unsigned (regcache, regnum, regval);
1460 /* Attempt to marshall a value in a V register. Return 1 if
1461 successful, or 0 if insufficient registers are available. This
1462 function, unlike the equivalent pass_in_x() function does not
1463 handle arguments spread across multiple registers. */
1466 pass_in_v (struct gdbarch *gdbarch,
1467 struct regcache *regcache,
1468 struct aarch64_call_info *info,
1469 int len, const bfd_byte *buf)
1473 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1474 /* Enough space for a full vector register. */
1475 gdb_byte reg[register_size (gdbarch, regnum)];
1476 gdb_assert (len <= sizeof (reg));
1481 memset (reg, 0, sizeof (reg));
1482 /* PCS C.1, the argument is allocated to the least significant
1483 bits of V register. */
1484 memcpy (reg, buf, len);
1485 regcache->cooked_write (regnum, reg);
1489 debug_printf ("arg %d in %s\n", info->argnum,
1490 gdbarch_register_name (gdbarch, regnum));
1498 /* Marshall an argument onto the stack. */
1501 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1504 const bfd_byte *buf = value_contents (arg);
1505 int len = TYPE_LENGTH (type);
1511 align = type_align (type);
1513 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1514 Natural alignment of the argument's type. */
1515 align = align_up (align, 8);
1517 /* The AArch64 PCS requires at most doubleword alignment. */
1523 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1529 info->si.push_back (item);
1532 if (info->nsaa & (align - 1))
1534 /* Push stack alignment padding. */
1535 int pad = align - (info->nsaa & (align - 1));
1540 info->si.push_back (item);
1545 /* Marshall an argument into a sequence of one or more consecutive X
1546 registers or, if insufficient X registers are available then onto
1550 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1551 struct aarch64_call_info *info, struct type *type,
1554 int len = TYPE_LENGTH (type);
1555 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1557 /* PCS C.13 - Pass in registers if we have enough spare */
1558 if (info->ngrn + nregs <= 8)
1560 pass_in_x (gdbarch, regcache, info, type, arg);
1561 info->ngrn += nregs;
1566 pass_on_stack (info, type, arg);
1570 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1571 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1572 registers. A return value of false is an error state as the value will have
1573 been partially passed to the stack. */
1575 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1576 struct aarch64_call_info *info, struct type *arg_type,
1579 switch (TYPE_CODE (arg_type))
1582 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1583 value_contents (arg));
1586 case TYPE_CODE_COMPLEX:
1588 const bfd_byte *buf = value_contents (arg);
1589 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1591 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1595 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1596 buf + TYPE_LENGTH (target_type));
1599 case TYPE_CODE_ARRAY:
1600 if (TYPE_VECTOR (arg_type))
1601 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1602 value_contents (arg));
1605 case TYPE_CODE_STRUCT:
1606 case TYPE_CODE_UNION:
1607 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1609 /* Don't include static fields. */
1610 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1613 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1614 struct type *field_type = check_typedef (value_type (field));
1616 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1627 /* Implement the "push_dummy_call" gdbarch method. */
1630 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1631 struct regcache *regcache, CORE_ADDR bp_addr,
1633 struct value **args, CORE_ADDR sp,
1634 function_call_return_method return_method,
1635 CORE_ADDR struct_addr)
1638 struct aarch64_call_info info;
1640 /* We need to know what the type of the called function is in order
1641 to determine the number of named/anonymous arguments for the
1642 actual argument placement, and the return type in order to handle
1643 return value correctly.
1645 The generic code above us views the decision of return in memory
1646 or return in registers as a two stage processes. The language
1647 handler is consulted first and may decide to return in memory (eg
1648 class with copy constructor returned by value), this will cause
1649 the generic code to allocate space AND insert an initial leading
1652 If the language code does not decide to pass in memory then the
1653 target code is consulted.
1655 If the language code decides to pass in memory we want to move
1656 the pointer inserted as the initial argument from the argument
1657 list and into X8, the conventional AArch64 struct return pointer
1660 /* Set the return address. For the AArch64, the return breakpoint
1661 is always at BP_ADDR. */
1662 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1664 /* If we were given an initial argument for the return slot, lose it. */
1665 if (return_method == return_method_hidden_param)
1671 /* The struct_return pointer occupies X8. */
1672 if (return_method != return_method_normal)
1676 debug_printf ("struct return in %s = 0x%s\n",
1677 gdbarch_register_name (gdbarch,
1678 AARCH64_STRUCT_RETURN_REGNUM),
1679 paddress (gdbarch, struct_addr));
1681 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1685 for (argnum = 0; argnum < nargs; argnum++)
1687 struct value *arg = args[argnum];
1688 struct type *arg_type, *fundamental_type;
1691 arg_type = check_typedef (value_type (arg));
1692 len = TYPE_LENGTH (arg_type);
1694 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1695 if there are enough spare registers. */
1696 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1699 if (info.nsrn + elements <= 8)
1701 /* We know that we have sufficient registers available therefore
1702 this will never need to fallback to the stack. */
1703 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1705 gdb_assert_not_reached ("Failed to push args");
1710 pass_on_stack (&info, arg_type, arg);
1715 switch (TYPE_CODE (arg_type))
1718 case TYPE_CODE_BOOL:
1719 case TYPE_CODE_CHAR:
1720 case TYPE_CODE_RANGE:
1721 case TYPE_CODE_ENUM:
1724 /* Promote to 32 bit integer. */
1725 if (TYPE_UNSIGNED (arg_type))
1726 arg_type = builtin_type (gdbarch)->builtin_uint32;
1728 arg_type = builtin_type (gdbarch)->builtin_int32;
1729 arg = value_cast (arg_type, arg);
1731 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1734 case TYPE_CODE_STRUCT:
1735 case TYPE_CODE_ARRAY:
1736 case TYPE_CODE_UNION:
1739 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1740 invisible reference. */
1742 /* Allocate aligned storage. */
1743 sp = align_down (sp - len, 16);
1745 /* Write the real data into the stack. */
1746 write_memory (sp, value_contents (arg), len);
1748 /* Construct the indirection. */
1749 arg_type = lookup_pointer_type (arg_type);
1750 arg = value_from_pointer (arg_type, sp);
1751 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1754 /* PCS C.15 / C.18 multiple values pass. */
1755 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1759 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1764 /* Make sure stack retains 16 byte alignment. */
1766 sp -= 16 - (info.nsaa & 15);
1768 while (!info.si.empty ())
1770 const stack_item_t &si = info.si.back ();
1773 if (si.data != NULL)
1774 write_memory (sp, si.data, si.len);
1775 info.si.pop_back ();
1778 /* Finally, update the SP register. */
1779 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1784 /* Implement the "frame_align" gdbarch method. */
1787 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1789 /* Align the stack to sixteen bytes. */
1790 return sp & ~(CORE_ADDR) 15;
1793 /* Return the type for an AdvSISD Q register. */
1795 static struct type *
1796 aarch64_vnq_type (struct gdbarch *gdbarch)
1798 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1800 if (tdep->vnq_type == NULL)
1805 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1808 elem = builtin_type (gdbarch)->builtin_uint128;
1809 append_composite_type_field (t, "u", elem);
1811 elem = builtin_type (gdbarch)->builtin_int128;
1812 append_composite_type_field (t, "s", elem);
1817 return tdep->vnq_type;
1820 /* Return the type for an AdvSISD D register. */
1822 static struct type *
1823 aarch64_vnd_type (struct gdbarch *gdbarch)
1825 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1827 if (tdep->vnd_type == NULL)
1832 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1835 elem = builtin_type (gdbarch)->builtin_double;
1836 append_composite_type_field (t, "f", elem);
1838 elem = builtin_type (gdbarch)->builtin_uint64;
1839 append_composite_type_field (t, "u", elem);
1841 elem = builtin_type (gdbarch)->builtin_int64;
1842 append_composite_type_field (t, "s", elem);
1847 return tdep->vnd_type;
1850 /* Return the type for an AdvSISD S register. */
1852 static struct type *
1853 aarch64_vns_type (struct gdbarch *gdbarch)
1855 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1857 if (tdep->vns_type == NULL)
1862 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1865 elem = builtin_type (gdbarch)->builtin_float;
1866 append_composite_type_field (t, "f", elem);
1868 elem = builtin_type (gdbarch)->builtin_uint32;
1869 append_composite_type_field (t, "u", elem);
1871 elem = builtin_type (gdbarch)->builtin_int32;
1872 append_composite_type_field (t, "s", elem);
1877 return tdep->vns_type;
1880 /* Return the type for an AdvSISD H register. */
1882 static struct type *
1883 aarch64_vnh_type (struct gdbarch *gdbarch)
1885 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1887 if (tdep->vnh_type == NULL)
1892 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1895 elem = builtin_type (gdbarch)->builtin_half;
1896 append_composite_type_field (t, "f", elem);
1898 elem = builtin_type (gdbarch)->builtin_uint16;
1899 append_composite_type_field (t, "u", elem);
1901 elem = builtin_type (gdbarch)->builtin_int16;
1902 append_composite_type_field (t, "s", elem);
1907 return tdep->vnh_type;
1910 /* Return the type for an AdvSISD B register. */
1912 static struct type *
1913 aarch64_vnb_type (struct gdbarch *gdbarch)
1915 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1917 if (tdep->vnb_type == NULL)
1922 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1925 elem = builtin_type (gdbarch)->builtin_uint8;
1926 append_composite_type_field (t, "u", elem);
1928 elem = builtin_type (gdbarch)->builtin_int8;
1929 append_composite_type_field (t, "s", elem);
1934 return tdep->vnb_type;
1937 /* Return the type for an AdvSISD V register. */
1939 static struct type *
1940 aarch64_vnv_type (struct gdbarch *gdbarch)
1942 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1944 if (tdep->vnv_type == NULL)
1946 /* The other AArch64 psuedo registers (Q,D,H,S,B) refer to a single value
1947 slice from the non-pseudo vector registers. However NEON V registers
1948 are always vector registers, and need constructing as such. */
1949 const struct builtin_type *bt = builtin_type (gdbarch);
1951 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1954 struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1956 append_composite_type_field (sub, "f",
1957 init_vector_type (bt->builtin_double, 2));
1958 append_composite_type_field (sub, "u",
1959 init_vector_type (bt->builtin_uint64, 2));
1960 append_composite_type_field (sub, "s",
1961 init_vector_type (bt->builtin_int64, 2));
1962 append_composite_type_field (t, "d", sub);
1964 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1966 append_composite_type_field (sub, "f",
1967 init_vector_type (bt->builtin_float, 4));
1968 append_composite_type_field (sub, "u",
1969 init_vector_type (bt->builtin_uint32, 4));
1970 append_composite_type_field (sub, "s",
1971 init_vector_type (bt->builtin_int32, 4));
1972 append_composite_type_field (t, "s", sub);
1974 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1976 append_composite_type_field (sub, "f",
1977 init_vector_type (bt->builtin_half, 8));
1978 append_composite_type_field (sub, "u",
1979 init_vector_type (bt->builtin_uint16, 8));
1980 append_composite_type_field (sub, "s",
1981 init_vector_type (bt->builtin_int16, 8));
1982 append_composite_type_field (t, "h", sub);
1984 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1986 append_composite_type_field (sub, "u",
1987 init_vector_type (bt->builtin_uint8, 16));
1988 append_composite_type_field (sub, "s",
1989 init_vector_type (bt->builtin_int8, 16));
1990 append_composite_type_field (t, "b", sub);
1992 sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1994 append_composite_type_field (sub, "u",
1995 init_vector_type (bt->builtin_uint128, 1));
1996 append_composite_type_field (sub, "s",
1997 init_vector_type (bt->builtin_int128, 1));
1998 append_composite_type_field (t, "q", sub);
2003 return tdep->vnv_type;
2006 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
2009 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2011 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2013 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2014 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2016 if (reg == AARCH64_DWARF_SP)
2017 return AARCH64_SP_REGNUM;
2019 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2020 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2022 if (reg == AARCH64_DWARF_SVE_VG)
2023 return AARCH64_SVE_VG_REGNUM;
2025 if (reg == AARCH64_DWARF_SVE_FFR)
2026 return AARCH64_SVE_FFR_REGNUM;
2028 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2029 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2031 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2032 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2034 if (tdep->has_pauth ())
2036 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2037 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2039 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2040 return tdep->pauth_ra_state_regnum;
2046 /* Implement the "print_insn" gdbarch method. */
2049 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2051 info->symbols = NULL;
2052 return default_print_insn (memaddr, info);
2055 /* AArch64 BRK software debug mode instruction.
2056 Note that AArch64 code is always little-endian.
2057 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
2058 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2060 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2062 /* Extract from an array REGS containing the (raw) register state a
2063 function return value of type TYPE, and copy that, in virtual
2064 format, into VALBUF. */
2067 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2070 struct gdbarch *gdbarch = regs->arch ();
2071 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2073 struct type *fundamental_type;
2075 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2078 int len = TYPE_LENGTH (fundamental_type);
2080 for (int i = 0; i < elements; i++)
2082 int regno = AARCH64_V0_REGNUM + i;
2083 /* Enough space for a full vector register. */
2084 gdb_byte buf[register_size (gdbarch, regno)];
2085 gdb_assert (len <= sizeof (buf));
2089 debug_printf ("read HFA or HVA return value element %d from %s\n",
2091 gdbarch_register_name (gdbarch, regno));
2093 regs->cooked_read (regno, buf);
2095 memcpy (valbuf, buf, len);
2099 else if (TYPE_CODE (type) == TYPE_CODE_INT
2100 || TYPE_CODE (type) == TYPE_CODE_CHAR
2101 || TYPE_CODE (type) == TYPE_CODE_BOOL
2102 || TYPE_CODE (type) == TYPE_CODE_PTR
2103 || TYPE_IS_REFERENCE (type)
2104 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2106 /* If the type is a plain integer, then the access is
2107 straight-forward. Otherwise we have to play around a bit
2109 int len = TYPE_LENGTH (type);
2110 int regno = AARCH64_X0_REGNUM;
2115 /* By using store_unsigned_integer we avoid having to do
2116 anything special for small big-endian values. */
2117 regcache_cooked_read_unsigned (regs, regno++, &tmp);
2118 store_unsigned_integer (valbuf,
2119 (len > X_REGISTER_SIZE
2120 ? X_REGISTER_SIZE : len), byte_order, tmp);
2121 len -= X_REGISTER_SIZE;
2122 valbuf += X_REGISTER_SIZE;
2127 /* For a structure or union the behaviour is as if the value had
2128 been stored to word-aligned memory and then loaded into
2129 registers with 64-bit load instruction(s). */
2130 int len = TYPE_LENGTH (type);
2131 int regno = AARCH64_X0_REGNUM;
2132 bfd_byte buf[X_REGISTER_SIZE];
2136 regs->cooked_read (regno++, buf);
2137 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2138 len -= X_REGISTER_SIZE;
2139 valbuf += X_REGISTER_SIZE;
2145 /* Will a function return an aggregate type in memory or in a
2146 register? Return 0 if an aggregate type can be returned in a
2147 register, 1 if it must be returned in memory. */
2150 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2152 type = check_typedef (type);
2154 struct type *fundamental_type;
2156 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2159 /* v0-v7 are used to return values and one register is allocated
2160 for one member. However, HFA or HVA has at most four members. */
2164 if (TYPE_LENGTH (type) > 16)
2166 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2167 invisible reference. */
2175 /* Write into appropriate registers a function return value of type
2176 TYPE, given in virtual format. */
2179 aarch64_store_return_value (struct type *type, struct regcache *regs,
2180 const gdb_byte *valbuf)
2182 struct gdbarch *gdbarch = regs->arch ();
2183 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2185 struct type *fundamental_type;
2187 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2190 int len = TYPE_LENGTH (fundamental_type);
2192 for (int i = 0; i < elements; i++)
2194 int regno = AARCH64_V0_REGNUM + i;
2195 /* Enough space for a full vector register. */
2196 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2197 gdb_assert (len <= sizeof (tmpbuf));
2201 debug_printf ("write HFA or HVA return value element %d to %s\n",
2203 gdbarch_register_name (gdbarch, regno));
2206 memcpy (tmpbuf, valbuf,
2207 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2208 regs->cooked_write (regno, tmpbuf);
2212 else if (TYPE_CODE (type) == TYPE_CODE_INT
2213 || TYPE_CODE (type) == TYPE_CODE_CHAR
2214 || TYPE_CODE (type) == TYPE_CODE_BOOL
2215 || TYPE_CODE (type) == TYPE_CODE_PTR
2216 || TYPE_IS_REFERENCE (type)
2217 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2219 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2221 /* Values of one word or less are zero/sign-extended and
2223 bfd_byte tmpbuf[X_REGISTER_SIZE];
2224 LONGEST val = unpack_long (type, valbuf);
2226 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2227 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2231 /* Integral values greater than one word are stored in
2232 consecutive registers starting with r0. This will always
2233 be a multiple of the regiser size. */
2234 int len = TYPE_LENGTH (type);
2235 int regno = AARCH64_X0_REGNUM;
2239 regs->cooked_write (regno++, valbuf);
2240 len -= X_REGISTER_SIZE;
2241 valbuf += X_REGISTER_SIZE;
2247 /* For a structure or union the behaviour is as if the value had
2248 been stored to word-aligned memory and then loaded into
2249 registers with 64-bit load instruction(s). */
2250 int len = TYPE_LENGTH (type);
2251 int regno = AARCH64_X0_REGNUM;
2252 bfd_byte tmpbuf[X_REGISTER_SIZE];
2256 memcpy (tmpbuf, valbuf,
2257 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2258 regs->cooked_write (regno++, tmpbuf);
2259 len -= X_REGISTER_SIZE;
2260 valbuf += X_REGISTER_SIZE;
2265 /* Implement the "return_value" gdbarch method. */
2267 static enum return_value_convention
2268 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2269 struct type *valtype, struct regcache *regcache,
2270 gdb_byte *readbuf, const gdb_byte *writebuf)
2273 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2274 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2275 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2277 if (aarch64_return_in_memory (gdbarch, valtype))
2280 debug_printf ("return value in memory\n");
2281 return RETURN_VALUE_STRUCT_CONVENTION;
2286 aarch64_store_return_value (valtype, regcache, writebuf);
2289 aarch64_extract_return_value (valtype, regcache, readbuf);
2292 debug_printf ("return value in registers\n");
2294 return RETURN_VALUE_REGISTER_CONVENTION;
2297 /* Implement the "get_longjmp_target" gdbarch method. */
2300 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2303 gdb_byte buf[X_REGISTER_SIZE];
2304 struct gdbarch *gdbarch = get_frame_arch (frame);
2305 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2306 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2308 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2310 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2314 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2318 /* Implement the "gen_return_address" gdbarch method. */
2321 aarch64_gen_return_address (struct gdbarch *gdbarch,
2322 struct agent_expr *ax, struct axs_value *value,
2325 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2326 value->kind = axs_lvalue_register;
2327 value->u.reg = AARCH64_LR_REGNUM;
2331 /* Return the pseudo register name corresponding to register regnum. */
2334 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2336 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2338 static const char *const q_name[] =
2340 "q0", "q1", "q2", "q3",
2341 "q4", "q5", "q6", "q7",
2342 "q8", "q9", "q10", "q11",
2343 "q12", "q13", "q14", "q15",
2344 "q16", "q17", "q18", "q19",
2345 "q20", "q21", "q22", "q23",
2346 "q24", "q25", "q26", "q27",
2347 "q28", "q29", "q30", "q31",
2350 static const char *const d_name[] =
2352 "d0", "d1", "d2", "d3",
2353 "d4", "d5", "d6", "d7",
2354 "d8", "d9", "d10", "d11",
2355 "d12", "d13", "d14", "d15",
2356 "d16", "d17", "d18", "d19",
2357 "d20", "d21", "d22", "d23",
2358 "d24", "d25", "d26", "d27",
2359 "d28", "d29", "d30", "d31",
2362 static const char *const s_name[] =
2364 "s0", "s1", "s2", "s3",
2365 "s4", "s5", "s6", "s7",
2366 "s8", "s9", "s10", "s11",
2367 "s12", "s13", "s14", "s15",
2368 "s16", "s17", "s18", "s19",
2369 "s20", "s21", "s22", "s23",
2370 "s24", "s25", "s26", "s27",
2371 "s28", "s29", "s30", "s31",
2374 static const char *const h_name[] =
2376 "h0", "h1", "h2", "h3",
2377 "h4", "h5", "h6", "h7",
2378 "h8", "h9", "h10", "h11",
2379 "h12", "h13", "h14", "h15",
2380 "h16", "h17", "h18", "h19",
2381 "h20", "h21", "h22", "h23",
2382 "h24", "h25", "h26", "h27",
2383 "h28", "h29", "h30", "h31",
2386 static const char *const b_name[] =
2388 "b0", "b1", "b2", "b3",
2389 "b4", "b5", "b6", "b7",
2390 "b8", "b9", "b10", "b11",
2391 "b12", "b13", "b14", "b15",
2392 "b16", "b17", "b18", "b19",
2393 "b20", "b21", "b22", "b23",
2394 "b24", "b25", "b26", "b27",
2395 "b28", "b29", "b30", "b31",
2398 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2400 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2401 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2403 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2404 return d_name[p_regnum - AARCH64_D0_REGNUM];
2406 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2407 return s_name[p_regnum - AARCH64_S0_REGNUM];
2409 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2410 return h_name[p_regnum - AARCH64_H0_REGNUM];
2412 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2413 return b_name[p_regnum - AARCH64_B0_REGNUM];
2415 if (tdep->has_sve ())
2417 static const char *const sve_v_name[] =
2419 "v0", "v1", "v2", "v3",
2420 "v4", "v5", "v6", "v7",
2421 "v8", "v9", "v10", "v11",
2422 "v12", "v13", "v14", "v15",
2423 "v16", "v17", "v18", "v19",
2424 "v20", "v21", "v22", "v23",
2425 "v24", "v25", "v26", "v27",
2426 "v28", "v29", "v30", "v31",
2429 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2430 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2431 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2434 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2435 prevents it from being read by methods such as
2436 mi_cmd_trace_frame_collected. */
2437 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2440 internal_error (__FILE__, __LINE__,
2441 _("aarch64_pseudo_register_name: bad register number %d"),
2445 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2447 static struct type *
2448 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2450 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2452 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2454 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2455 return aarch64_vnq_type (gdbarch);
2457 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2458 return aarch64_vnd_type (gdbarch);
2460 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2461 return aarch64_vns_type (gdbarch);
2463 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2464 return aarch64_vnh_type (gdbarch);
2466 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2467 return aarch64_vnb_type (gdbarch);
2469 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2470 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2471 return aarch64_vnv_type (gdbarch);
2473 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2474 return builtin_type (gdbarch)->builtin_uint64;
2476 internal_error (__FILE__, __LINE__,
2477 _("aarch64_pseudo_register_type: bad register number %d"),
2481 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2484 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2485 struct reggroup *group)
2487 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2489 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2491 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2492 return group == all_reggroup || group == vector_reggroup;
2493 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2494 return (group == all_reggroup || group == vector_reggroup
2495 || group == float_reggroup);
2496 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2497 return (group == all_reggroup || group == vector_reggroup
2498 || group == float_reggroup);
2499 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2500 return group == all_reggroup || group == vector_reggroup;
2501 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2502 return group == all_reggroup || group == vector_reggroup;
2503 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2504 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2505 return group == all_reggroup || group == vector_reggroup;
2506 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2507 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2510 return group == all_reggroup;
2513 /* Helper for aarch64_pseudo_read_value. */
2515 static struct value *
2516 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2517 readable_regcache *regcache, int regnum_offset,
2518 int regsize, struct value *result_value)
2520 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2522 /* Enough space for a full vector register. */
2523 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2524 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2526 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2527 mark_value_bytes_unavailable (result_value, 0,
2528 TYPE_LENGTH (value_type (result_value)));
2530 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2532 return result_value;
2535 /* Implement the "pseudo_register_read_value" gdbarch method. */
2537 static struct value *
2538 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2541 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2542 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2544 VALUE_LVAL (result_value) = lval_register;
2545 VALUE_REGNUM (result_value) = regnum;
2547 regnum -= gdbarch_num_regs (gdbarch);
2549 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2550 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2551 regnum - AARCH64_Q0_REGNUM,
2552 Q_REGISTER_SIZE, result_value);
2554 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2555 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2556 regnum - AARCH64_D0_REGNUM,
2557 D_REGISTER_SIZE, result_value);
2559 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2560 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2561 regnum - AARCH64_S0_REGNUM,
2562 S_REGISTER_SIZE, result_value);
2564 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2565 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2566 regnum - AARCH64_H0_REGNUM,
2567 H_REGISTER_SIZE, result_value);
2569 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2570 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2571 regnum - AARCH64_B0_REGNUM,
2572 B_REGISTER_SIZE, result_value);
2574 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2575 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2576 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2577 regnum - AARCH64_SVE_V0_REGNUM,
2578 V_REGISTER_SIZE, result_value);
2580 gdb_assert_not_reached ("regnum out of bound");
2583 /* Helper for aarch64_pseudo_write. */
2586 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2587 int regnum_offset, int regsize, const gdb_byte *buf)
2589 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2591 /* Enough space for a full vector register. */
2592 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2593 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2595 /* Ensure the register buffer is zero, we want gdb writes of the
2596 various 'scalar' pseudo registers to behavior like architectural
2597 writes, register width bytes are written the remainder are set to
2599 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2601 memcpy (reg_buf, buf, regsize);
2602 regcache->raw_write (v_regnum, reg_buf);
2605 /* Implement the "pseudo_register_write" gdbarch method. */
2608 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2609 int regnum, const gdb_byte *buf)
2611 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2612 regnum -= gdbarch_num_regs (gdbarch);
2614 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2615 return aarch64_pseudo_write_1 (gdbarch, regcache,
2616 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2619 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2620 return aarch64_pseudo_write_1 (gdbarch, regcache,
2621 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2624 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2625 return aarch64_pseudo_write_1 (gdbarch, regcache,
2626 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2629 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2630 return aarch64_pseudo_write_1 (gdbarch, regcache,
2631 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2634 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2635 return aarch64_pseudo_write_1 (gdbarch, regcache,
2636 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2639 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2640 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2641 return aarch64_pseudo_write_1 (gdbarch, regcache,
2642 regnum - AARCH64_SVE_V0_REGNUM,
2643 V_REGISTER_SIZE, buf);
2645 gdb_assert_not_reached ("regnum out of bound");
2648 /* Callback function for user_reg_add. */
2650 static struct value *
2651 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2653 const int *reg_p = (const int *) baton;
2655 return value_of_register (*reg_p, frame);
2659 /* Implement the "software_single_step" gdbarch method, needed to
2660 single step through atomic sequences on AArch64. */
2662 static std::vector<CORE_ADDR>
2663 aarch64_software_single_step (struct regcache *regcache)
2665 struct gdbarch *gdbarch = regcache->arch ();
2666 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2667 const int insn_size = 4;
2668 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2669 CORE_ADDR pc = regcache_read_pc (regcache);
2670 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2672 CORE_ADDR closing_insn = 0;
2673 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2674 byte_order_for_code);
2677 int bc_insn_count = 0; /* Conditional branch instruction count. */
2678 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2681 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2684 /* Look for a Load Exclusive instruction which begins the sequence. */
2685 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2688 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2691 insn = read_memory_unsigned_integer (loc, insn_size,
2692 byte_order_for_code);
2694 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2696 /* Check if the instruction is a conditional branch. */
2697 if (inst.opcode->iclass == condbranch)
2699 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2701 if (bc_insn_count >= 1)
2704 /* It is, so we'll try to set a breakpoint at the destination. */
2705 breaks[1] = loc + inst.operands[0].imm.value;
2711 /* Look for the Store Exclusive which closes the atomic sequence. */
2712 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2719 /* We didn't find a closing Store Exclusive instruction, fall back. */
2723 /* Insert breakpoint after the end of the atomic sequence. */
2724 breaks[0] = loc + insn_size;
2726 /* Check for duplicated breakpoints, and also check that the second
2727 breakpoint is not within the atomic sequence. */
2729 && (breaks[1] == breaks[0]
2730 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2731 last_breakpoint = 0;
2733 std::vector<CORE_ADDR> next_pcs;
2735 /* Insert the breakpoint at the end of the sequence, and one at the
2736 destination of the conditional branch, if it exists. */
2737 for (index = 0; index <= last_breakpoint; index++)
2738 next_pcs.push_back (breaks[index]);
2743 struct aarch64_displaced_step_closure : public displaced_step_closure
2745 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2746 is being displaced stepping. */
2749 /* PC adjustment offset after displaced stepping. */
2750 int32_t pc_adjust = 0;
2753 /* Data when visiting instructions for displaced stepping. */
2755 struct aarch64_displaced_step_data
2757 struct aarch64_insn_data base;
2759 /* The address where the instruction will be executed at. */
2761 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2762 uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2763 /* Number of instructions in INSN_BUF. */
2764 unsigned insn_count;
2765 /* Registers when doing displaced stepping. */
2766 struct regcache *regs;
2768 aarch64_displaced_step_closure *dsc;
2771 /* Implementation of aarch64_insn_visitor method "b". */
2774 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2775 struct aarch64_insn_data *data)
2777 struct aarch64_displaced_step_data *dsd
2778 = (struct aarch64_displaced_step_data *) data;
2779 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2781 if (can_encode_int32 (new_offset, 28))
2783 /* Emit B rather than BL, because executing BL on a new address
2784 will get the wrong address into LR. In order to avoid this,
2785 we emit B, and update LR if the instruction is BL. */
2786 emit_b (dsd->insn_buf, 0, new_offset);
2792 emit_nop (dsd->insn_buf);
2794 dsd->dsc->pc_adjust = offset;
2800 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2801 data->insn_addr + 4);
2805 /* Implementation of aarch64_insn_visitor method "b_cond". */
2808 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2809 struct aarch64_insn_data *data)
2811 struct aarch64_displaced_step_data *dsd
2812 = (struct aarch64_displaced_step_data *) data;
2814 /* GDB has to fix up PC after displaced step this instruction
2815 differently according to the condition is true or false. Instead
2816 of checking COND against conditional flags, we can use
2817 the following instructions, and GDB can tell how to fix up PC
2818 according to the PC value.
2820 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2826 emit_bcond (dsd->insn_buf, cond, 8);
2828 dsd->dsc->pc_adjust = offset;
2829 dsd->insn_count = 1;
2832 /* Dynamically allocate a new register. If we know the register
2833 statically, we should make it a global as above instead of using this
2836 static struct aarch64_register
2837 aarch64_register (unsigned num, int is64)
2839 return (struct aarch64_register) { num, is64 };
2842 /* Implementation of aarch64_insn_visitor method "cb". */
2845 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2846 const unsigned rn, int is64,
2847 struct aarch64_insn_data *data)
2849 struct aarch64_displaced_step_data *dsd
2850 = (struct aarch64_displaced_step_data *) data;
2852 /* The offset is out of range for a compare and branch
2853 instruction. We can use the following instructions instead:
2855 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2860 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2861 dsd->insn_count = 1;
2863 dsd->dsc->pc_adjust = offset;
2866 /* Implementation of aarch64_insn_visitor method "tb". */
2869 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2870 const unsigned rt, unsigned bit,
2871 struct aarch64_insn_data *data)
2873 struct aarch64_displaced_step_data *dsd
2874 = (struct aarch64_displaced_step_data *) data;
2876 /* The offset is out of range for a test bit and branch
2877 instruction We can use the following instructions instead:
2879 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2885 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2886 dsd->insn_count = 1;
2888 dsd->dsc->pc_adjust = offset;
2891 /* Implementation of aarch64_insn_visitor method "adr". */
2894 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2895 const int is_adrp, struct aarch64_insn_data *data)
2897 struct aarch64_displaced_step_data *dsd
2898 = (struct aarch64_displaced_step_data *) data;
2899 /* We know exactly the address the ADR{P,} instruction will compute.
2900 We can just write it to the destination register. */
2901 CORE_ADDR address = data->insn_addr + offset;
2905 /* Clear the lower 12 bits of the offset to get the 4K page. */
2906 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2910 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2913 dsd->dsc->pc_adjust = 4;
2914 emit_nop (dsd->insn_buf);
2915 dsd->insn_count = 1;
2918 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2921 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2922 const unsigned rt, const int is64,
2923 struct aarch64_insn_data *data)
2925 struct aarch64_displaced_step_data *dsd
2926 = (struct aarch64_displaced_step_data *) data;
2927 CORE_ADDR address = data->insn_addr + offset;
2928 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2930 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2934 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2935 aarch64_register (rt, 1), zero);
2937 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2938 aarch64_register (rt, 1), zero);
2940 dsd->dsc->pc_adjust = 4;
2943 /* Implementation of aarch64_insn_visitor method "others". */
2946 aarch64_displaced_step_others (const uint32_t insn,
2947 struct aarch64_insn_data *data)
2949 struct aarch64_displaced_step_data *dsd
2950 = (struct aarch64_displaced_step_data *) data;
2952 aarch64_emit_insn (dsd->insn_buf, insn);
2953 dsd->insn_count = 1;
2955 if ((insn & 0xfffffc1f) == 0xd65f0000)
2958 dsd->dsc->pc_adjust = 0;
2961 dsd->dsc->pc_adjust = 4;
2964 static const struct aarch64_insn_visitor visitor =
2966 aarch64_displaced_step_b,
2967 aarch64_displaced_step_b_cond,
2968 aarch64_displaced_step_cb,
2969 aarch64_displaced_step_tb,
2970 aarch64_displaced_step_adr,
2971 aarch64_displaced_step_ldr_literal,
2972 aarch64_displaced_step_others,
2975 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2977 struct displaced_step_closure *
2978 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2979 CORE_ADDR from, CORE_ADDR to,
2980 struct regcache *regs)
2982 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2983 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2984 struct aarch64_displaced_step_data dsd;
2987 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2990 /* Look for a Load Exclusive instruction which begins the sequence. */
2991 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2993 /* We can't displaced step atomic sequences. */
2997 std::unique_ptr<aarch64_displaced_step_closure> dsc
2998 (new aarch64_displaced_step_closure);
2999 dsd.base.insn_addr = from;
3002 dsd.dsc = dsc.get ();
3004 aarch64_relocate_instruction (insn, &visitor,
3005 (struct aarch64_insn_data *) &dsd);
3006 gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3008 if (dsd.insn_count != 0)
3012 /* Instruction can be relocated to scratch pad. Copy
3013 relocated instruction(s) there. */
3014 for (i = 0; i < dsd.insn_count; i++)
3016 if (debug_displaced)
3018 debug_printf ("displaced: writing insn ");
3019 debug_printf ("%.8x", dsd.insn_buf[i]);
3020 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
3022 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3023 (ULONGEST) dsd.insn_buf[i]);
3031 return dsc.release ();
3034 /* Implement the "displaced_step_fixup" gdbarch method. */
3037 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3038 struct displaced_step_closure *dsc_,
3039 CORE_ADDR from, CORE_ADDR to,
3040 struct regcache *regs)
3042 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
3048 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3051 /* Condition is true. */
3053 else if (pc - to == 4)
3055 /* Condition is false. */
3059 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3062 if (dsc->pc_adjust != 0)
3064 if (debug_displaced)
3066 debug_printf ("displaced: fixup: set PC to %s:%d\n",
3067 paddress (gdbarch, from), dsc->pc_adjust);
3069 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3070 from + dsc->pc_adjust);
3074 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
3077 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
3078 struct displaced_step_closure *closure)
3083 /* Get the correct target description for the given VQ value.
3084 If VQ is zero then it is assumed SVE is not supported.
3085 (It is not possible to set VQ to zero on an SVE system). */
3088 aarch64_read_description (uint64_t vq, bool pauth_p)
3090 if (vq > AARCH64_MAX_SVE_VQ)
3091 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3092 AARCH64_MAX_SVE_VQ);
3094 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
3098 tdesc = aarch64_create_target_description (vq, pauth_p);
3099 tdesc_aarch64_list[vq][pauth_p] = tdesc;
3105 /* Return the VQ used when creating the target description TDESC. */
3108 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3110 const struct tdesc_feature *feature_sve;
3112 if (!tdesc_has_registers (tdesc))
3115 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3117 if (feature_sve == nullptr)
3120 uint64_t vl = tdesc_register_bitsize (feature_sve,
3121 aarch64_sve_register_names[0]) / 8;
3122 return sve_vq_from_vl (vl);
3125 /* Add all the expected register sets into GDBARCH. */
3128 aarch64_add_reggroups (struct gdbarch *gdbarch)
3130 reggroup_add (gdbarch, general_reggroup);
3131 reggroup_add (gdbarch, float_reggroup);
3132 reggroup_add (gdbarch, system_reggroup);
3133 reggroup_add (gdbarch, vector_reggroup);
3134 reggroup_add (gdbarch, all_reggroup);
3135 reggroup_add (gdbarch, save_reggroup);
3136 reggroup_add (gdbarch, restore_reggroup);
3139 /* Implement the "cannot_store_register" gdbarch method. */
3142 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3144 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3146 if (!tdep->has_pauth ())
3149 /* Pointer authentication registers are read-only. */
3150 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3151 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3154 /* Initialize the current architecture based on INFO. If possible,
3155 re-use an architecture from ARCHES, which is a list of
3156 architectures already created during this debugging session.
3158 Called e.g. at program startup, when reading a core file, and when
3159 reading a binary file. */
3161 static struct gdbarch *
3162 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3164 const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3165 const struct tdesc_feature *feature_pauth;
3166 bool valid_p = true;
3167 int i, num_regs = 0, num_pseudo_regs = 0;
3168 int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3170 /* Use the vector length passed via the target info. Here -1 is used for no
3171 SVE, and 0 is unset. If unset then use the vector length from the existing
3174 if (info.id == (int *) -1)
3176 else if (info.id != 0)
3177 vq = (uint64_t) info.id;
3179 vq = aarch64_get_tdesc_vq (info.target_desc);
3181 if (vq > AARCH64_MAX_SVE_VQ)
3182 internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3183 pulongest (vq), AARCH64_MAX_SVE_VQ);
3185 /* If there is already a candidate, use it. */
3186 for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3187 best_arch != nullptr;
3188 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3190 struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3191 if (tdep && tdep->vq == vq)
3192 return best_arch->gdbarch;
3195 /* Ensure we always have a target descriptor, and that it is for the given VQ
3197 const struct target_desc *tdesc = info.target_desc;
3198 if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3199 tdesc = aarch64_read_description (vq, false);
3202 feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3203 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3204 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3205 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3207 if (feature_core == nullptr)
3210 struct tdesc_arch_data *tdesc_data = tdesc_data_alloc ();
3212 /* Validate the description provides the mandatory core R registers
3213 and allocate their numbers. */
3214 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3215 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3216 AARCH64_X0_REGNUM + i,
3217 aarch64_r_register_names[i]);
3219 num_regs = AARCH64_X0_REGNUM + i;
3221 /* Add the V registers. */
3222 if (feature_fpu != nullptr)
3224 if (feature_sve != nullptr)
3225 error (_("Program contains both fpu and SVE features."));
3227 /* Validate the description provides the mandatory V registers
3228 and allocate their numbers. */
3229 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3230 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3231 AARCH64_V0_REGNUM + i,
3232 aarch64_v_register_names[i]);
3234 num_regs = AARCH64_V0_REGNUM + i;
3237 /* Add the SVE registers. */
3238 if (feature_sve != nullptr)
3240 /* Validate the description provides the mandatory SVE registers
3241 and allocate their numbers. */
3242 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3243 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3244 AARCH64_SVE_Z0_REGNUM + i,
3245 aarch64_sve_register_names[i]);
3247 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3248 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3251 if (feature_fpu != nullptr || feature_sve != nullptr)
3253 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3254 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3255 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3256 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3257 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3260 /* Add the pauth registers. */
3261 if (feature_pauth != NULL)
3263 first_pauth_regnum = num_regs;
3264 pauth_ra_state_offset = num_pseudo_regs;
3265 /* Validate the descriptor provides the mandatory PAUTH registers and
3266 allocate their numbers. */
3267 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3268 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3269 first_pauth_regnum + i,
3270 aarch64_pauth_register_names[i]);
3273 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3278 tdesc_data_cleanup (tdesc_data);
3282 /* AArch64 code is always little-endian. */
3283 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3285 struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3286 struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3288 /* This should be low enough for everything. */
3289 tdep->lowest_pc = 0x20;
3290 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3291 tdep->jb_elt_size = 8;
3293 tdep->pauth_reg_base = first_pauth_regnum;
3294 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3295 : pauth_ra_state_offset + num_regs;
3297 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3298 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3300 /* Advance PC across function entry code. */
3301 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3303 /* The stack grows downward. */
3304 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3306 /* Breakpoint manipulation. */
3307 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3308 aarch64_breakpoint::kind_from_pc);
3309 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3310 aarch64_breakpoint::bp_from_kind);
3311 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3312 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3314 /* Information about registers, etc. */
3315 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3316 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3317 set_gdbarch_num_regs (gdbarch, num_regs);
3319 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3320 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3321 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3322 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3323 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3324 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3325 aarch64_pseudo_register_reggroup_p);
3326 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3329 set_gdbarch_short_bit (gdbarch, 16);
3330 set_gdbarch_int_bit (gdbarch, 32);
3331 set_gdbarch_float_bit (gdbarch, 32);
3332 set_gdbarch_double_bit (gdbarch, 64);
3333 set_gdbarch_long_double_bit (gdbarch, 128);
3334 set_gdbarch_long_bit (gdbarch, 64);
3335 set_gdbarch_long_long_bit (gdbarch, 64);
3336 set_gdbarch_ptr_bit (gdbarch, 64);
3337 set_gdbarch_char_signed (gdbarch, 0);
3338 set_gdbarch_wchar_signed (gdbarch, 0);
3339 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3340 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3341 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3342 set_gdbarch_type_align (gdbarch, aarch64_type_align);
3344 /* Internal <-> external register number maps. */
3345 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3347 /* Returning results. */
3348 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3351 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3353 /* Virtual tables. */
3354 set_gdbarch_vbit_in_delta (gdbarch, 1);
3356 /* Register architecture. */
3357 aarch64_add_reggroups (gdbarch);
3359 /* Hook in the ABI-specific overrides, if they have been registered. */
3360 info.target_desc = tdesc;
3361 info.tdesc_data = tdesc_data;
3362 gdbarch_init_osabi (info, gdbarch);
3364 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3365 /* Register DWARF CFA vendor handler. */
3366 set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3367 aarch64_execute_dwarf_cfa_vendor_op);
3369 /* Add some default predicates. */
3370 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3371 dwarf2_append_unwinders (gdbarch);
3372 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3374 frame_base_set_default (gdbarch, &aarch64_normal_base);
3376 /* Now we have tuned the configuration, set a few final things,
3377 based on what the OS ABI has told us. */
3379 if (tdep->jb_pc >= 0)
3380 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3382 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3384 set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3386 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3388 /* Add standard register aliases. */
3389 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3390 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3391 value_of_aarch64_user_reg,
3392 &aarch64_register_aliases[i].regnum);
3394 register_aarch64_ravenscar_ops (gdbarch);
3400 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3402 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3407 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3408 paddress (gdbarch, tdep->lowest_pc));
3414 static void aarch64_process_record_test (void);
3419 _initialize_aarch64_tdep (void)
3421 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3424 /* Debug this file's internals. */
3425 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3426 Set AArch64 debugging."), _("\
3427 Show AArch64 debugging."), _("\
3428 When on, AArch64 specific debugging is enabled."),
3431 &setdebuglist, &showdebuglist);
3434 selftests::register_test ("aarch64-analyze-prologue",
3435 selftests::aarch64_analyze_prologue_test);
3436 selftests::register_test ("aarch64-process-record",
3437 selftests::aarch64_process_record_test);
3441 /* AArch64 process record-replay related structures, defines etc. */
3443 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3446 unsigned int reg_len = LENGTH; \
3449 REGS = XNEWVEC (uint32_t, reg_len); \
3450 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3455 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3458 unsigned int mem_len = LENGTH; \
3461 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3462 memcpy(&MEMS->len, &RECORD_BUF[0], \
3463 sizeof(struct aarch64_mem_r) * LENGTH); \
3468 /* AArch64 record/replay structures and enumerations. */
3470 struct aarch64_mem_r
3472 uint64_t len; /* Record length. */
3473 uint64_t addr; /* Memory address. */
3476 enum aarch64_record_result
3478 AARCH64_RECORD_SUCCESS,
3479 AARCH64_RECORD_UNSUPPORTED,
3480 AARCH64_RECORD_UNKNOWN
3483 typedef struct insn_decode_record_t
3485 struct gdbarch *gdbarch;
3486 struct regcache *regcache;
3487 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3488 uint32_t aarch64_insn; /* Insn to be recorded. */
3489 uint32_t mem_rec_count; /* Count of memory records. */
3490 uint32_t reg_rec_count; /* Count of register records. */
3491 uint32_t *aarch64_regs; /* Registers to be recorded. */
3492 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3493 } insn_decode_record;
3495 /* Record handler for data processing - register instructions. */
3498 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3500 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3501 uint32_t record_buf[4];
3503 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3504 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3505 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3507 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3511 /* Logical (shifted register). */
3512 if (insn_bits24_27 == 0x0a)
3513 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3515 else if (insn_bits24_27 == 0x0b)
3516 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3518 return AARCH64_RECORD_UNKNOWN;
3520 record_buf[0] = reg_rd;
3521 aarch64_insn_r->reg_rec_count = 1;
3523 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3527 if (insn_bits24_27 == 0x0b)
3529 /* Data-processing (3 source). */
3530 record_buf[0] = reg_rd;
3531 aarch64_insn_r->reg_rec_count = 1;
3533 else if (insn_bits24_27 == 0x0a)
3535 if (insn_bits21_23 == 0x00)
3537 /* Add/subtract (with carry). */
3538 record_buf[0] = reg_rd;
3539 aarch64_insn_r->reg_rec_count = 1;
3540 if (bit (aarch64_insn_r->aarch64_insn, 29))
3542 record_buf[1] = AARCH64_CPSR_REGNUM;
3543 aarch64_insn_r->reg_rec_count = 2;
3546 else if (insn_bits21_23 == 0x02)
3548 /* Conditional compare (register) and conditional compare
3549 (immediate) instructions. */
3550 record_buf[0] = AARCH64_CPSR_REGNUM;
3551 aarch64_insn_r->reg_rec_count = 1;
3553 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3555 /* CConditional select. */
3556 /* Data-processing (2 source). */
3557 /* Data-processing (1 source). */
3558 record_buf[0] = reg_rd;
3559 aarch64_insn_r->reg_rec_count = 1;
3562 return AARCH64_RECORD_UNKNOWN;
3566 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3568 return AARCH64_RECORD_SUCCESS;
3571 /* Record handler for data processing - immediate instructions. */
3574 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3576 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3577 uint32_t record_buf[4];
3579 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3580 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3581 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3583 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3584 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3585 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3587 record_buf[0] = reg_rd;
3588 aarch64_insn_r->reg_rec_count = 1;
3590 else if (insn_bits24_27 == 0x01)
3592 /* Add/Subtract (immediate). */
3593 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3594 record_buf[0] = reg_rd;
3595 aarch64_insn_r->reg_rec_count = 1;
3597 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3599 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3601 /* Logical (immediate). */
3602 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3603 record_buf[0] = reg_rd;
3604 aarch64_insn_r->reg_rec_count = 1;
3606 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3609 return AARCH64_RECORD_UNKNOWN;
3611 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3613 return AARCH64_RECORD_SUCCESS;
3616 /* Record handler for branch, exception generation and system instructions. */
3619 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3621 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3622 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3623 uint32_t record_buf[4];
3625 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3626 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3627 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3629 if (insn_bits28_31 == 0x0d)
3631 /* Exception generation instructions. */
3632 if (insn_bits24_27 == 0x04)
3634 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3635 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3636 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3638 ULONGEST svc_number;
3640 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3642 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3646 return AARCH64_RECORD_UNSUPPORTED;
3648 /* System instructions. */
3649 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3651 uint32_t reg_rt, reg_crn;
3653 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3654 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3656 /* Record rt in case of sysl and mrs instructions. */
3657 if (bit (aarch64_insn_r->aarch64_insn, 21))
3659 record_buf[0] = reg_rt;
3660 aarch64_insn_r->reg_rec_count = 1;
3662 /* Record cpsr for hint and msr(immediate) instructions. */
3663 else if (reg_crn == 0x02 || reg_crn == 0x04)
3665 record_buf[0] = AARCH64_CPSR_REGNUM;
3666 aarch64_insn_r->reg_rec_count = 1;
3669 /* Unconditional branch (register). */
3670 else if((insn_bits24_27 & 0x0e) == 0x06)
3672 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3673 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3674 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3677 return AARCH64_RECORD_UNKNOWN;
3679 /* Unconditional branch (immediate). */
3680 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3682 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3683 if (bit (aarch64_insn_r->aarch64_insn, 31))
3684 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3687 /* Compare & branch (immediate), Test & branch (immediate) and
3688 Conditional branch (immediate). */
3689 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3691 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3693 return AARCH64_RECORD_SUCCESS;
3696 /* Record handler for advanced SIMD load and store instructions. */
3699 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3702 uint64_t addr_offset = 0;
3703 uint32_t record_buf[24];
3704 uint64_t record_buf_mem[24];
3705 uint32_t reg_rn, reg_rt;
3706 uint32_t reg_index = 0, mem_index = 0;
3707 uint8_t opcode_bits, size_bits;
3709 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3710 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3711 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3712 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3713 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3716 debug_printf ("Process record: Advanced SIMD load/store\n");
3718 /* Load/store single structure. */
3719 if (bit (aarch64_insn_r->aarch64_insn, 24))
3721 uint8_t sindex, scale, selem, esize, replicate = 0;
3722 scale = opcode_bits >> 2;
3723 selem = ((opcode_bits & 0x02) |
3724 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3728 if (size_bits & 0x01)
3729 return AARCH64_RECORD_UNKNOWN;
3732 if ((size_bits >> 1) & 0x01)
3733 return AARCH64_RECORD_UNKNOWN;
3734 if (size_bits & 0x01)
3736 if (!((opcode_bits >> 1) & 0x01))
3739 return AARCH64_RECORD_UNKNOWN;
3743 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3750 return AARCH64_RECORD_UNKNOWN;
3756 for (sindex = 0; sindex < selem; sindex++)
3758 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3759 reg_rt = (reg_rt + 1) % 32;
3763 for (sindex = 0; sindex < selem; sindex++)
3765 if (bit (aarch64_insn_r->aarch64_insn, 22))
3766 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3769 record_buf_mem[mem_index++] = esize / 8;
3770 record_buf_mem[mem_index++] = address + addr_offset;
3772 addr_offset = addr_offset + (esize / 8);
3773 reg_rt = (reg_rt + 1) % 32;
3777 /* Load/store multiple structure. */
3780 uint8_t selem, esize, rpt, elements;
3781 uint8_t eindex, rindex;
3783 esize = 8 << size_bits;
3784 if (bit (aarch64_insn_r->aarch64_insn, 30))
3785 elements = 128 / esize;
3787 elements = 64 / esize;
3789 switch (opcode_bits)
3791 /*LD/ST4 (4 Registers). */
3796 /*LD/ST1 (4 Registers). */
3801 /*LD/ST3 (3 Registers). */
3806 /*LD/ST1 (3 Registers). */
3811 /*LD/ST1 (1 Register). */
3816 /*LD/ST2 (2 Registers). */
3821 /*LD/ST1 (2 Registers). */
3827 return AARCH64_RECORD_UNSUPPORTED;
3830 for (rindex = 0; rindex < rpt; rindex++)
3831 for (eindex = 0; eindex < elements; eindex++)
3833 uint8_t reg_tt, sindex;
3834 reg_tt = (reg_rt + rindex) % 32;
3835 for (sindex = 0; sindex < selem; sindex++)
3837 if (bit (aarch64_insn_r->aarch64_insn, 22))
3838 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3841 record_buf_mem[mem_index++] = esize / 8;
3842 record_buf_mem[mem_index++] = address + addr_offset;
3844 addr_offset = addr_offset + (esize / 8);
3845 reg_tt = (reg_tt + 1) % 32;
3850 if (bit (aarch64_insn_r->aarch64_insn, 23))
3851 record_buf[reg_index++] = reg_rn;
3853 aarch64_insn_r->reg_rec_count = reg_index;
3854 aarch64_insn_r->mem_rec_count = mem_index / 2;
3855 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3857 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3859 return AARCH64_RECORD_SUCCESS;
3862 /* Record handler for load and store instructions. */
3865 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3867 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3868 uint8_t insn_bit23, insn_bit21;
3869 uint8_t opc, size_bits, ld_flag, vector_flag;
3870 uint32_t reg_rn, reg_rt, reg_rt2;
3871 uint64_t datasize, offset;
3872 uint32_t record_buf[8];
3873 uint64_t record_buf_mem[8];
3876 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3877 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3878 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3879 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3880 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3881 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3882 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3883 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3884 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3885 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3886 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3888 /* Load/store exclusive. */
3889 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3892 debug_printf ("Process record: load/store exclusive\n");
3896 record_buf[0] = reg_rt;
3897 aarch64_insn_r->reg_rec_count = 1;
3900 record_buf[1] = reg_rt2;
3901 aarch64_insn_r->reg_rec_count = 2;
3907 datasize = (8 << size_bits) * 2;
3909 datasize = (8 << size_bits);
3910 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3912 record_buf_mem[0] = datasize / 8;
3913 record_buf_mem[1] = address;
3914 aarch64_insn_r->mem_rec_count = 1;
3917 /* Save register rs. */
3918 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3919 aarch64_insn_r->reg_rec_count = 1;
3923 /* Load register (literal) instructions decoding. */
3924 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3927 debug_printf ("Process record: load register (literal)\n");
3929 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3931 record_buf[0] = reg_rt;
3932 aarch64_insn_r->reg_rec_count = 1;
3934 /* All types of load/store pair instructions decoding. */
3935 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3938 debug_printf ("Process record: load/store pair\n");
3944 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3945 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3949 record_buf[0] = reg_rt;
3950 record_buf[1] = reg_rt2;
3952 aarch64_insn_r->reg_rec_count = 2;
3957 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3959 size_bits = size_bits >> 1;
3960 datasize = 8 << (2 + size_bits);
3961 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3962 offset = offset << (2 + size_bits);
3963 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3965 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3967 if (imm7_off & 0x40)
3968 address = address - offset;
3970 address = address + offset;
3973 record_buf_mem[0] = datasize / 8;
3974 record_buf_mem[1] = address;
3975 record_buf_mem[2] = datasize / 8;
3976 record_buf_mem[3] = address + (datasize / 8);
3977 aarch64_insn_r->mem_rec_count = 2;
3979 if (bit (aarch64_insn_r->aarch64_insn, 23))
3980 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3982 /* Load/store register (unsigned immediate) instructions. */
3983 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3985 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3995 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3997 /* PRFM (immediate) */
3998 return AARCH64_RECORD_SUCCESS;
4000 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4002 /* LDRSW (immediate) */
4016 debug_printf ("Process record: load/store (unsigned immediate):"
4017 " size %x V %d opc %x\n", size_bits, vector_flag,
4023 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4024 datasize = 8 << size_bits;
4025 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4027 offset = offset << size_bits;
4028 address = address + offset;
4030 record_buf_mem[0] = datasize >> 3;
4031 record_buf_mem[1] = address;
4032 aarch64_insn_r->mem_rec_count = 1;
4037 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4039 record_buf[0] = reg_rt;
4040 aarch64_insn_r->reg_rec_count = 1;
4043 /* Load/store register (register offset) instructions. */
4044 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4045 && insn_bits10_11 == 0x02 && insn_bit21)
4048 debug_printf ("Process record: load/store (register offset)\n");
4049 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4056 if (size_bits != 0x03)
4059 return AARCH64_RECORD_UNKNOWN;
4063 ULONGEST reg_rm_val;
4065 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4066 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
4067 if (bit (aarch64_insn_r->aarch64_insn, 12))
4068 offset = reg_rm_val << size_bits;
4070 offset = reg_rm_val;
4071 datasize = 8 << size_bits;
4072 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4074 address = address + offset;
4075 record_buf_mem[0] = datasize >> 3;
4076 record_buf_mem[1] = address;
4077 aarch64_insn_r->mem_rec_count = 1;
4082 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4084 record_buf[0] = reg_rt;
4085 aarch64_insn_r->reg_rec_count = 1;
4088 /* Load/store register (immediate and unprivileged) instructions. */
4089 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4094 debug_printf ("Process record: load/store "
4095 "(immediate and unprivileged)\n");
4097 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4104 if (size_bits != 0x03)
4107 return AARCH64_RECORD_UNKNOWN;
4112 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4113 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4114 datasize = 8 << size_bits;
4115 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4117 if (insn_bits10_11 != 0x01)
4119 if (imm9_off & 0x0100)
4120 address = address - offset;
4122 address = address + offset;
4124 record_buf_mem[0] = datasize >> 3;
4125 record_buf_mem[1] = address;
4126 aarch64_insn_r->mem_rec_count = 1;
4131 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4133 record_buf[0] = reg_rt;
4134 aarch64_insn_r->reg_rec_count = 1;
4136 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4137 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4139 /* Advanced SIMD load/store instructions. */
4141 return aarch64_record_asimd_load_store (aarch64_insn_r);
4143 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4145 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4147 return AARCH64_RECORD_SUCCESS;
4150 /* Record handler for data processing SIMD and floating point instructions. */
4153 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4155 uint8_t insn_bit21, opcode, rmode, reg_rd;
4156 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4157 uint8_t insn_bits11_14;
4158 uint32_t record_buf[2];
4160 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4161 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4162 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4163 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4164 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4165 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4166 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4167 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4168 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4171 debug_printf ("Process record: data processing SIMD/FP: ");
4173 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4175 /* Floating point - fixed point conversion instructions. */
4179 debug_printf ("FP - fixed point conversion");
4181 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4182 record_buf[0] = reg_rd;
4184 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4186 /* Floating point - conditional compare instructions. */
4187 else if (insn_bits10_11 == 0x01)
4190 debug_printf ("FP - conditional compare");
4192 record_buf[0] = AARCH64_CPSR_REGNUM;
4194 /* Floating point - data processing (2-source) and
4195 conditional select instructions. */
4196 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4199 debug_printf ("FP - DP (2-source)");
4201 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4203 else if (insn_bits10_11 == 0x00)
4205 /* Floating point - immediate instructions. */
4206 if ((insn_bits12_15 & 0x01) == 0x01
4207 || (insn_bits12_15 & 0x07) == 0x04)
4210 debug_printf ("FP - immediate");
4211 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4213 /* Floating point - compare instructions. */
4214 else if ((insn_bits12_15 & 0x03) == 0x02)
4217 debug_printf ("FP - immediate");
4218 record_buf[0] = AARCH64_CPSR_REGNUM;
4220 /* Floating point - integer conversions instructions. */
4221 else if (insn_bits12_15 == 0x00)
4223 /* Convert float to integer instruction. */
4224 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4227 debug_printf ("float to int conversion");
4229 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4231 /* Convert integer to float instruction. */
4232 else if ((opcode >> 1) == 0x01 && !rmode)
4235 debug_printf ("int to float conversion");
4237 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4239 /* Move float to integer instruction. */
4240 else if ((opcode >> 1) == 0x03)
4243 debug_printf ("move float to int");
4245 if (!(opcode & 0x01))
4246 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4248 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4251 return AARCH64_RECORD_UNKNOWN;
4254 return AARCH64_RECORD_UNKNOWN;
4257 return AARCH64_RECORD_UNKNOWN;
4259 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4262 debug_printf ("SIMD copy");
4264 /* Advanced SIMD copy instructions. */
4265 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4266 && !bit (aarch64_insn_r->aarch64_insn, 15)
4267 && bit (aarch64_insn_r->aarch64_insn, 10))
4269 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4270 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4272 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4275 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4277 /* All remaining floating point or advanced SIMD instructions. */
4281 debug_printf ("all remain");
4283 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4287 debug_printf ("\n");
4289 aarch64_insn_r->reg_rec_count++;
4290 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4291 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4293 return AARCH64_RECORD_SUCCESS;
4296 /* Decodes insns type and invokes its record handler. */
4299 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4301 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4303 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4304 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4305 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4306 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4308 /* Data processing - immediate instructions. */
4309 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4310 return aarch64_record_data_proc_imm (aarch64_insn_r);
4312 /* Branch, exception generation and system instructions. */
4313 if (ins_bit26 && !ins_bit27 && ins_bit28)
4314 return aarch64_record_branch_except_sys (aarch64_insn_r);
4316 /* Load and store instructions. */
4317 if (!ins_bit25 && ins_bit27)
4318 return aarch64_record_load_store (aarch64_insn_r);
4320 /* Data processing - register instructions. */
4321 if (ins_bit25 && !ins_bit26 && ins_bit27)
4322 return aarch64_record_data_proc_reg (aarch64_insn_r);
4324 /* Data processing - SIMD and floating point instructions. */
4325 if (ins_bit25 && ins_bit26 && ins_bit27)
4326 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4328 return AARCH64_RECORD_UNSUPPORTED;
4331 /* Cleans up local record registers and memory allocations. */
4334 deallocate_reg_mem (insn_decode_record *record)
4336 xfree (record->aarch64_regs);
4337 xfree (record->aarch64_mems);
4341 namespace selftests {
4344 aarch64_process_record_test (void)
4346 struct gdbarch_info info;
4349 gdbarch_info_init (&info);
4350 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4352 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4353 SELF_CHECK (gdbarch != NULL);
4355 insn_decode_record aarch64_record;
4357 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4358 aarch64_record.regcache = NULL;
4359 aarch64_record.this_addr = 0;
4360 aarch64_record.gdbarch = gdbarch;
4362 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4363 aarch64_record.aarch64_insn = 0xf9800020;
4364 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4365 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4366 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4367 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4369 deallocate_reg_mem (&aarch64_record);
4372 } // namespace selftests
4373 #endif /* GDB_SELF_TEST */
4375 /* Parse the current instruction and record the values of the registers and
4376 memory that will be changed in current instruction to record_arch_list
4377 return -1 if something is wrong. */
4380 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4381 CORE_ADDR insn_addr)
4383 uint32_t rec_no = 0;
4384 uint8_t insn_size = 4;
4386 gdb_byte buf[insn_size];
4387 insn_decode_record aarch64_record;
4389 memset (&buf[0], 0, insn_size);
4390 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4391 target_read_memory (insn_addr, &buf[0], insn_size);
4392 aarch64_record.aarch64_insn
4393 = (uint32_t) extract_unsigned_integer (&buf[0],
4395 gdbarch_byte_order (gdbarch));
4396 aarch64_record.regcache = regcache;
4397 aarch64_record.this_addr = insn_addr;
4398 aarch64_record.gdbarch = gdbarch;
4400 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4401 if (ret == AARCH64_RECORD_UNSUPPORTED)
4403 printf_unfiltered (_("Process record does not support instruction "
4404 "0x%0x at address %s.\n"),
4405 aarch64_record.aarch64_insn,
4406 paddress (gdbarch, insn_addr));
4412 /* Record registers. */
4413 record_full_arch_list_add_reg (aarch64_record.regcache,
4415 /* Always record register CPSR. */
4416 record_full_arch_list_add_reg (aarch64_record.regcache,
4417 AARCH64_CPSR_REGNUM);
4418 if (aarch64_record.aarch64_regs)
4419 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4420 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4421 aarch64_record.aarch64_regs[rec_no]))
4424 /* Record memories. */
4425 if (aarch64_record.aarch64_mems)
4426 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4427 if (record_full_arch_list_add_mem
4428 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4429 aarch64_record.aarch64_mems[rec_no].len))
4432 if (record_full_arch_list_add_end ())
4436 deallocate_reg_mem (&aarch64_record);