1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
37 #include "dwarf2-frame.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
46 #include "common/selftest.h"
48 #include "aarch64-tdep.h"
49 #include "aarch64-ravenscar-thread.h"
52 #include "elf/aarch64.h"
54 #include "common/vec.h"
57 #include "record-full.h"
58 #include "arch/aarch64-insn.h"
60 #include "opcode/aarch64.h"
63 #define submask(x) ((1L << ((x) + 1)) - 1)
64 #define bit(obj,st) (((obj) >> (st)) & 1)
65 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
69 #define HA_MAX_NUM_FLDS 4
71 /* All possible aarch64 target descriptors. */
72 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
74 /* The standard register names, and all the valid aliases for them. */
77 const char *const name;
79 } aarch64_register_aliases[] =
81 /* 64-bit register names. */
82 {"fp", AARCH64_FP_REGNUM},
83 {"lr", AARCH64_LR_REGNUM},
84 {"sp", AARCH64_SP_REGNUM},
86 /* 32-bit register names. */
87 {"w0", AARCH64_X0_REGNUM + 0},
88 {"w1", AARCH64_X0_REGNUM + 1},
89 {"w2", AARCH64_X0_REGNUM + 2},
90 {"w3", AARCH64_X0_REGNUM + 3},
91 {"w4", AARCH64_X0_REGNUM + 4},
92 {"w5", AARCH64_X0_REGNUM + 5},
93 {"w6", AARCH64_X0_REGNUM + 6},
94 {"w7", AARCH64_X0_REGNUM + 7},
95 {"w8", AARCH64_X0_REGNUM + 8},
96 {"w9", AARCH64_X0_REGNUM + 9},
97 {"w10", AARCH64_X0_REGNUM + 10},
98 {"w11", AARCH64_X0_REGNUM + 11},
99 {"w12", AARCH64_X0_REGNUM + 12},
100 {"w13", AARCH64_X0_REGNUM + 13},
101 {"w14", AARCH64_X0_REGNUM + 14},
102 {"w15", AARCH64_X0_REGNUM + 15},
103 {"w16", AARCH64_X0_REGNUM + 16},
104 {"w17", AARCH64_X0_REGNUM + 17},
105 {"w18", AARCH64_X0_REGNUM + 18},
106 {"w19", AARCH64_X0_REGNUM + 19},
107 {"w20", AARCH64_X0_REGNUM + 20},
108 {"w21", AARCH64_X0_REGNUM + 21},
109 {"w22", AARCH64_X0_REGNUM + 22},
110 {"w23", AARCH64_X0_REGNUM + 23},
111 {"w24", AARCH64_X0_REGNUM + 24},
112 {"w25", AARCH64_X0_REGNUM + 25},
113 {"w26", AARCH64_X0_REGNUM + 26},
114 {"w27", AARCH64_X0_REGNUM + 27},
115 {"w28", AARCH64_X0_REGNUM + 28},
116 {"w29", AARCH64_X0_REGNUM + 29},
117 {"w30", AARCH64_X0_REGNUM + 30},
120 {"ip0", AARCH64_X0_REGNUM + 16},
121 {"ip1", AARCH64_X0_REGNUM + 17}
124 /* The required core 'R' registers. */
125 static const char *const aarch64_r_register_names[] =
127 /* These registers must appear in consecutive RAW register number
128 order and they must begin with AARCH64_X0_REGNUM! */
129 "x0", "x1", "x2", "x3",
130 "x4", "x5", "x6", "x7",
131 "x8", "x9", "x10", "x11",
132 "x12", "x13", "x14", "x15",
133 "x16", "x17", "x18", "x19",
134 "x20", "x21", "x22", "x23",
135 "x24", "x25", "x26", "x27",
136 "x28", "x29", "x30", "sp",
140 /* The FP/SIMD 'V' registers. */
141 static const char *const aarch64_v_register_names[] =
143 /* These registers must appear in consecutive RAW register number
144 order and they must begin with AARCH64_V0_REGNUM! */
145 "v0", "v1", "v2", "v3",
146 "v4", "v5", "v6", "v7",
147 "v8", "v9", "v10", "v11",
148 "v12", "v13", "v14", "v15",
149 "v16", "v17", "v18", "v19",
150 "v20", "v21", "v22", "v23",
151 "v24", "v25", "v26", "v27",
152 "v28", "v29", "v30", "v31",
157 /* The SVE 'Z' and 'P' registers. */
158 static const char *const aarch64_sve_register_names[] =
160 /* These registers must appear in consecutive RAW register number
161 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
162 "z0", "z1", "z2", "z3",
163 "z4", "z5", "z6", "z7",
164 "z8", "z9", "z10", "z11",
165 "z12", "z13", "z14", "z15",
166 "z16", "z17", "z18", "z19",
167 "z20", "z21", "z22", "z23",
168 "z24", "z25", "z26", "z27",
169 "z28", "z29", "z30", "z31",
171 "p0", "p1", "p2", "p3",
172 "p4", "p5", "p6", "p7",
173 "p8", "p9", "p10", "p11",
174 "p12", "p13", "p14", "p15",
178 /* AArch64 prologue cache structure. */
179 struct aarch64_prologue_cache
181 /* The program counter at the start of the function. It is used to
182 identify this frame as a prologue frame. */
185 /* The program counter at the time this frame was created; i.e. where
186 this function was called from. It is used to identify this frame as a
190 /* The stack pointer at the time this frame was created; i.e. the
191 caller's stack pointer when this function was called. It is used
192 to identify this frame. */
195 /* Is the target available to read from? */
198 /* The frame base for this frame is just prev_sp - frame size.
199 FRAMESIZE is the distance from the frame pointer to the
200 initial stack pointer. */
203 /* The register used to hold the frame pointer for this frame. */
206 /* Saved register offsets. */
207 struct trad_frame_saved_reg *saved_regs;
211 show_aarch64_debug (struct ui_file *file, int from_tty,
212 struct cmd_list_element *c, const char *value)
214 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
219 /* Abstract instruction reader. */
221 class abstract_instruction_reader
224 /* Read in one instruction. */
225 virtual ULONGEST read (CORE_ADDR memaddr, int len,
226 enum bfd_endian byte_order) = 0;
229 /* Instruction reader from real target. */
231 class instruction_reader : public abstract_instruction_reader
234 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
237 return read_code_unsigned_integer (memaddr, len, byte_order);
243 /* Analyze a prologue, looking for a recognizable stack frame
244 and frame pointer. Scan until we encounter a store that could
245 clobber the stack frame unexpectedly, or an unknown instruction. */
248 aarch64_analyze_prologue (struct gdbarch *gdbarch,
249 CORE_ADDR start, CORE_ADDR limit,
250 struct aarch64_prologue_cache *cache,
251 abstract_instruction_reader& reader)
253 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
255 /* Track X registers and D registers in prologue. */
256 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
258 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
259 regs[i] = pv_register (i, 0);
260 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
262 for (; start < limit; start += 4)
267 insn = reader.read (start, 4, byte_order_for_code);
269 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
272 if (inst.opcode->iclass == addsub_imm
273 && (inst.opcode->op == OP_ADD
274 || strcmp ("sub", inst.opcode->name) == 0))
276 unsigned rd = inst.operands[0].reg.regno;
277 unsigned rn = inst.operands[1].reg.regno;
279 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
280 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
281 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
282 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
284 if (inst.opcode->op == OP_ADD)
286 regs[rd] = pv_add_constant (regs[rn],
287 inst.operands[2].imm.value);
291 regs[rd] = pv_add_constant (regs[rn],
292 -inst.operands[2].imm.value);
295 else if (inst.opcode->iclass == pcreladdr
296 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
298 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
299 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
301 regs[inst.operands[0].reg.regno] = pv_unknown ();
303 else if (inst.opcode->iclass == branch_imm)
305 /* Stop analysis on branch. */
308 else if (inst.opcode->iclass == condbranch)
310 /* Stop analysis on branch. */
313 else if (inst.opcode->iclass == branch_reg)
315 /* Stop analysis on branch. */
318 else if (inst.opcode->iclass == compbranch)
320 /* Stop analysis on branch. */
323 else if (inst.opcode->op == OP_MOVZ)
325 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
326 regs[inst.operands[0].reg.regno] = pv_unknown ();
328 else if (inst.opcode->iclass == log_shift
329 && strcmp (inst.opcode->name, "orr") == 0)
331 unsigned rd = inst.operands[0].reg.regno;
332 unsigned rn = inst.operands[1].reg.regno;
333 unsigned rm = inst.operands[2].reg.regno;
335 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
336 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
337 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
339 if (inst.operands[2].shifter.amount == 0
340 && rn == AARCH64_SP_REGNUM)
346 debug_printf ("aarch64: prologue analysis gave up "
347 "addr=%s opcode=0x%x (orr x register)\n",
348 core_addr_to_string_nz (start), insn);
353 else if (inst.opcode->op == OP_STUR)
355 unsigned rt = inst.operands[0].reg.regno;
356 unsigned rn = inst.operands[1].addr.base_regno;
358 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
360 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
361 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
362 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
363 gdb_assert (!inst.operands[1].addr.offset.is_reg);
365 stack.store (pv_add_constant (regs[rn],
366 inst.operands[1].addr.offset.imm),
367 is64 ? 8 : 4, regs[rt]);
369 else if ((inst.opcode->iclass == ldstpair_off
370 || (inst.opcode->iclass == ldstpair_indexed
371 && inst.operands[2].addr.preind))
372 && strcmp ("stp", inst.opcode->name) == 0)
374 /* STP with addressing mode Pre-indexed and Base register. */
377 unsigned rn = inst.operands[2].addr.base_regno;
378 int32_t imm = inst.operands[2].addr.offset.imm;
380 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
381 || inst.operands[0].type == AARCH64_OPND_Ft);
382 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
383 || inst.operands[1].type == AARCH64_OPND_Ft2);
384 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
385 gdb_assert (!inst.operands[2].addr.offset.is_reg);
387 /* If recording this store would invalidate the store area
388 (perhaps because rn is not known) then we should abandon
389 further prologue analysis. */
390 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
393 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
396 rt1 = inst.operands[0].reg.regno;
397 rt2 = inst.operands[1].reg.regno;
398 if (inst.operands[0].type == AARCH64_OPND_Ft)
400 /* Only bottom 64-bit of each V register (D register) need
402 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
403 rt1 += AARCH64_X_REGISTER_COUNT;
404 rt2 += AARCH64_X_REGISTER_COUNT;
407 stack.store (pv_add_constant (regs[rn], imm), 8,
409 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
412 if (inst.operands[2].addr.writeback)
413 regs[rn] = pv_add_constant (regs[rn], imm);
416 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
417 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
418 && (inst.opcode->op == OP_STR_POS
419 || inst.opcode->op == OP_STRF_POS)))
420 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
421 && strcmp ("str", inst.opcode->name) == 0)
423 /* STR (immediate) */
424 unsigned int rt = inst.operands[0].reg.regno;
425 int32_t imm = inst.operands[1].addr.offset.imm;
426 unsigned int rn = inst.operands[1].addr.base_regno;
428 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
429 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
430 || inst.operands[0].type == AARCH64_OPND_Ft);
432 if (inst.operands[0].type == AARCH64_OPND_Ft)
434 /* Only bottom 64-bit of each V register (D register) need
436 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
437 rt += AARCH64_X_REGISTER_COUNT;
440 stack.store (pv_add_constant (regs[rn], imm),
441 is64 ? 8 : 4, regs[rt]);
442 if (inst.operands[1].addr.writeback)
443 regs[rn] = pv_add_constant (regs[rn], imm);
445 else if (inst.opcode->iclass == testbranch)
447 /* Stop analysis on branch. */
454 debug_printf ("aarch64: prologue analysis gave up addr=%s"
456 core_addr_to_string_nz (start), insn);
465 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
467 /* Frame pointer is fp. Frame size is constant. */
468 cache->framereg = AARCH64_FP_REGNUM;
469 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
471 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
473 /* Try the stack pointer. */
474 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
475 cache->framereg = AARCH64_SP_REGNUM;
479 /* We're just out of luck. We don't know where the frame is. */
480 cache->framereg = -1;
481 cache->framesize = 0;
484 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
488 if (stack.find_reg (gdbarch, i, &offset))
489 cache->saved_regs[i].addr = offset;
492 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
494 int regnum = gdbarch_num_regs (gdbarch);
497 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
499 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
506 aarch64_analyze_prologue (struct gdbarch *gdbarch,
507 CORE_ADDR start, CORE_ADDR limit,
508 struct aarch64_prologue_cache *cache)
510 instruction_reader reader;
512 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
518 namespace selftests {
520 /* Instruction reader from manually cooked instruction sequences. */
522 class instruction_reader_test : public abstract_instruction_reader
525 template<size_t SIZE>
526 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
527 : m_insns (insns), m_insns_size (SIZE)
530 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
533 SELF_CHECK (len == 4);
534 SELF_CHECK (memaddr % 4 == 0);
535 SELF_CHECK (memaddr / 4 < m_insns_size);
537 return m_insns[memaddr / 4];
541 const uint32_t *m_insns;
546 aarch64_analyze_prologue_test (void)
548 struct gdbarch_info info;
550 gdbarch_info_init (&info);
551 info.bfd_arch_info = bfd_scan_arch ("aarch64");
553 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
554 SELF_CHECK (gdbarch != NULL);
556 /* Test the simple prologue in which frame pointer is used. */
558 struct aarch64_prologue_cache cache;
559 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
561 static const uint32_t insns[] = {
562 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
563 0x910003fd, /* mov x29, sp */
564 0x97ffffe6, /* bl 0x400580 */
566 instruction_reader_test reader (insns);
568 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
569 SELF_CHECK (end == 4 * 2);
571 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
572 SELF_CHECK (cache.framesize == 272);
574 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
576 if (i == AARCH64_FP_REGNUM)
577 SELF_CHECK (cache.saved_regs[i].addr == -272);
578 else if (i == AARCH64_LR_REGNUM)
579 SELF_CHECK (cache.saved_regs[i].addr == -264);
581 SELF_CHECK (cache.saved_regs[i].addr == -1);
584 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
586 int regnum = gdbarch_num_regs (gdbarch);
588 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
593 /* Test a prologue in which STR is used and frame pointer is not
596 struct aarch64_prologue_cache cache;
597 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
599 static const uint32_t insns[] = {
600 0xf81d0ff3, /* str x19, [sp, #-48]! */
601 0xb9002fe0, /* str w0, [sp, #44] */
602 0xf90013e1, /* str x1, [sp, #32]*/
603 0xfd000fe0, /* str d0, [sp, #24] */
604 0xaa0203f3, /* mov x19, x2 */
605 0xf94013e0, /* ldr x0, [sp, #32] */
607 instruction_reader_test reader (insns);
609 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
611 SELF_CHECK (end == 4 * 5);
613 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
614 SELF_CHECK (cache.framesize == 48);
616 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
619 SELF_CHECK (cache.saved_regs[i].addr == -16);
621 SELF_CHECK (cache.saved_regs[i].addr == -48);
623 SELF_CHECK (cache.saved_regs[i].addr == -1);
626 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
628 int regnum = gdbarch_num_regs (gdbarch);
631 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
634 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
639 } // namespace selftests
640 #endif /* GDB_SELF_TEST */
642 /* Implement the "skip_prologue" gdbarch method. */
645 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
647 CORE_ADDR func_addr, limit_pc;
649 /* See if we can determine the end of the prologue via the symbol
650 table. If so, then return either PC, or the PC after the
651 prologue, whichever is greater. */
652 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
654 CORE_ADDR post_prologue_pc
655 = skip_prologue_using_sal (gdbarch, func_addr);
657 if (post_prologue_pc != 0)
658 return std::max (pc, post_prologue_pc);
661 /* Can't determine prologue from the symbol table, need to examine
664 /* Find an upper limit on the function prologue using the debug
665 information. If the debug information could not be used to
666 provide that bound, then use an arbitrary large number as the
668 limit_pc = skip_prologue_using_sal (gdbarch, pc);
670 limit_pc = pc + 128; /* Magic. */
672 /* Try disassembling prologue. */
673 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
676 /* Scan the function prologue for THIS_FRAME and populate the prologue
680 aarch64_scan_prologue (struct frame_info *this_frame,
681 struct aarch64_prologue_cache *cache)
683 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
684 CORE_ADDR prologue_start;
685 CORE_ADDR prologue_end;
686 CORE_ADDR prev_pc = get_frame_pc (this_frame);
687 struct gdbarch *gdbarch = get_frame_arch (this_frame);
689 cache->prev_pc = prev_pc;
691 /* Assume we do not find a frame. */
692 cache->framereg = -1;
693 cache->framesize = 0;
695 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
698 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
702 /* No line info so use the current PC. */
703 prologue_end = prev_pc;
705 else if (sal.end < prologue_end)
707 /* The next line begins after the function end. */
708 prologue_end = sal.end;
711 prologue_end = std::min (prologue_end, prev_pc);
712 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
718 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
722 cache->framereg = AARCH64_FP_REGNUM;
723 cache->framesize = 16;
724 cache->saved_regs[29].addr = 0;
725 cache->saved_regs[30].addr = 8;
729 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
730 function may throw an exception if the inferior's registers or memory is
734 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
735 struct aarch64_prologue_cache *cache)
737 CORE_ADDR unwound_fp;
740 aarch64_scan_prologue (this_frame, cache);
742 if (cache->framereg == -1)
745 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
749 cache->prev_sp = unwound_fp + cache->framesize;
751 /* Calculate actual addresses of saved registers using offsets
752 determined by aarch64_analyze_prologue. */
753 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
754 if (trad_frame_addr_p (cache->saved_regs, reg))
755 cache->saved_regs[reg].addr += cache->prev_sp;
757 cache->func = get_frame_func (this_frame);
759 cache->available_p = 1;
762 /* Allocate and fill in *THIS_CACHE with information about the prologue of
763 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
764 Return a pointer to the current aarch64_prologue_cache in
767 static struct aarch64_prologue_cache *
768 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
770 struct aarch64_prologue_cache *cache;
772 if (*this_cache != NULL)
773 return (struct aarch64_prologue_cache *) *this_cache;
775 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
776 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
781 aarch64_make_prologue_cache_1 (this_frame, cache);
783 CATCH (ex, RETURN_MASK_ERROR)
785 if (ex.error != NOT_AVAILABLE_ERROR)
786 throw_exception (ex);
793 /* Implement the "stop_reason" frame_unwind method. */
795 static enum unwind_stop_reason
796 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
799 struct aarch64_prologue_cache *cache
800 = aarch64_make_prologue_cache (this_frame, this_cache);
802 if (!cache->available_p)
803 return UNWIND_UNAVAILABLE;
805 /* Halt the backtrace at "_start". */
806 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
807 return UNWIND_OUTERMOST;
809 /* We've hit a wall, stop. */
810 if (cache->prev_sp == 0)
811 return UNWIND_OUTERMOST;
813 return UNWIND_NO_REASON;
816 /* Our frame ID for a normal frame is the current function's starting
817 PC and the caller's SP when we were called. */
820 aarch64_prologue_this_id (struct frame_info *this_frame,
821 void **this_cache, struct frame_id *this_id)
823 struct aarch64_prologue_cache *cache
824 = aarch64_make_prologue_cache (this_frame, this_cache);
826 if (!cache->available_p)
827 *this_id = frame_id_build_unavailable_stack (cache->func);
829 *this_id = frame_id_build (cache->prev_sp, cache->func);
832 /* Implement the "prev_register" frame_unwind method. */
834 static struct value *
835 aarch64_prologue_prev_register (struct frame_info *this_frame,
836 void **this_cache, int prev_regnum)
838 struct aarch64_prologue_cache *cache
839 = aarch64_make_prologue_cache (this_frame, this_cache);
841 /* If we are asked to unwind the PC, then we need to return the LR
842 instead. The prologue may save PC, but it will point into this
843 frame's prologue, not the next frame's resume location. */
844 if (prev_regnum == AARCH64_PC_REGNUM)
848 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
849 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
852 /* SP is generally not saved to the stack, but this frame is
853 identified by the next frame's stack pointer at the time of the
854 call. The value was already reconstructed into PREV_SP. */
867 if (prev_regnum == AARCH64_SP_REGNUM)
868 return frame_unwind_got_constant (this_frame, prev_regnum,
871 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
875 /* AArch64 prologue unwinder. */
876 struct frame_unwind aarch64_prologue_unwind =
879 aarch64_prologue_frame_unwind_stop_reason,
880 aarch64_prologue_this_id,
881 aarch64_prologue_prev_register,
883 default_frame_sniffer
886 /* Allocate and fill in *THIS_CACHE with information about the prologue of
887 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
888 Return a pointer to the current aarch64_prologue_cache in
891 static struct aarch64_prologue_cache *
892 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
894 struct aarch64_prologue_cache *cache;
896 if (*this_cache != NULL)
897 return (struct aarch64_prologue_cache *) *this_cache;
899 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
900 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
905 cache->prev_sp = get_frame_register_unsigned (this_frame,
907 cache->prev_pc = get_frame_pc (this_frame);
908 cache->available_p = 1;
910 CATCH (ex, RETURN_MASK_ERROR)
912 if (ex.error != NOT_AVAILABLE_ERROR)
913 throw_exception (ex);
920 /* Implement the "stop_reason" frame_unwind method. */
922 static enum unwind_stop_reason
923 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
926 struct aarch64_prologue_cache *cache
927 = aarch64_make_stub_cache (this_frame, this_cache);
929 if (!cache->available_p)
930 return UNWIND_UNAVAILABLE;
932 return UNWIND_NO_REASON;
935 /* Our frame ID for a stub frame is the current SP and LR. */
938 aarch64_stub_this_id (struct frame_info *this_frame,
939 void **this_cache, struct frame_id *this_id)
941 struct aarch64_prologue_cache *cache
942 = aarch64_make_stub_cache (this_frame, this_cache);
944 if (cache->available_p)
945 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
947 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
950 /* Implement the "sniffer" frame_unwind method. */
953 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
954 struct frame_info *this_frame,
955 void **this_prologue_cache)
957 CORE_ADDR addr_in_block;
960 addr_in_block = get_frame_address_in_block (this_frame);
961 if (in_plt_section (addr_in_block)
962 /* We also use the stub winder if the target memory is unreadable
963 to avoid having the prologue unwinder trying to read it. */
964 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
970 /* AArch64 stub unwinder. */
971 struct frame_unwind aarch64_stub_unwind =
974 aarch64_stub_frame_unwind_stop_reason,
975 aarch64_stub_this_id,
976 aarch64_prologue_prev_register,
978 aarch64_stub_unwind_sniffer
981 /* Return the frame base address of *THIS_FRAME. */
984 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
986 struct aarch64_prologue_cache *cache
987 = aarch64_make_prologue_cache (this_frame, this_cache);
989 return cache->prev_sp - cache->framesize;
992 /* AArch64 default frame base information. */
993 struct frame_base aarch64_normal_base =
995 &aarch64_prologue_unwind,
996 aarch64_normal_frame_base,
997 aarch64_normal_frame_base,
998 aarch64_normal_frame_base
1001 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1002 dummy frame. The frame ID's base needs to match the TOS value
1003 saved by save_dummy_frame_tos () and returned from
1004 aarch64_push_dummy_call, and the PC needs to match the dummy
1005 frame's breakpoint. */
1007 static struct frame_id
1008 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1010 return frame_id_build (get_frame_register_unsigned (this_frame,
1012 get_frame_pc (this_frame));
1015 /* Implement the "unwind_pc" gdbarch method. */
1018 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1021 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1026 /* Implement the "unwind_sp" gdbarch method. */
1029 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1031 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1034 /* Return the value of the REGNUM register in the previous frame of
1037 static struct value *
1038 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1039 void **this_cache, int regnum)
1045 case AARCH64_PC_REGNUM:
1046 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1047 return frame_unwind_got_constant (this_frame, regnum, lr);
1050 internal_error (__FILE__, __LINE__,
1051 _("Unexpected register %d"), regnum);
1055 /* Implement the "init_reg" dwarf2_frame_ops method. */
1058 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1059 struct dwarf2_frame_state_reg *reg,
1060 struct frame_info *this_frame)
1064 case AARCH64_PC_REGNUM:
1065 reg->how = DWARF2_FRAME_REG_FN;
1066 reg->loc.fn = aarch64_dwarf2_prev_register;
1068 case AARCH64_SP_REGNUM:
1069 reg->how = DWARF2_FRAME_REG_CFA;
1074 /* When arguments must be pushed onto the stack, they go on in reverse
1075 order. The code below implements a FILO (stack) to do this. */
1079 /* Value to pass on stack. It can be NULL if this item is for stack
1081 const gdb_byte *data;
1083 /* Size in bytes of value to pass on stack. */
1087 DEF_VEC_O (stack_item_t);
1089 /* Return the alignment (in bytes) of the given type. */
1092 aarch64_type_align (struct type *t)
1098 t = check_typedef (t);
1099 switch (TYPE_CODE (t))
1102 /* Should never happen. */
1103 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1107 case TYPE_CODE_ENUM:
1111 case TYPE_CODE_RANGE:
1112 case TYPE_CODE_BITSTRING:
1114 case TYPE_CODE_RVALUE_REF:
1115 case TYPE_CODE_CHAR:
1116 case TYPE_CODE_BOOL:
1117 return TYPE_LENGTH (t);
1119 case TYPE_CODE_ARRAY:
1120 if (TYPE_VECTOR (t))
1122 /* Use the natural alignment for vector types (the same for
1123 scalar type), but the maximum alignment is 128-bit. */
1124 if (TYPE_LENGTH (t) > 16)
1127 return TYPE_LENGTH (t);
1130 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1131 case TYPE_CODE_COMPLEX:
1132 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1134 case TYPE_CODE_STRUCT:
1135 case TYPE_CODE_UNION:
1137 for (n = 0; n < TYPE_NFIELDS (t); n++)
1139 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1147 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1149 Return the number of register required, or -1 on failure.
1151 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1152 to the element, else fail if the type of this element does not match the
1156 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1157 struct type **fundamental_type)
1159 if (type == nullptr)
1162 switch (TYPE_CODE (type))
1165 if (TYPE_LENGTH (type) > 16)
1168 if (*fundamental_type == nullptr)
1169 *fundamental_type = type;
1170 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1171 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1176 case TYPE_CODE_COMPLEX:
1178 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1179 if (TYPE_LENGTH (target_type) > 16)
1182 if (*fundamental_type == nullptr)
1183 *fundamental_type = target_type;
1184 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1185 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1191 case TYPE_CODE_ARRAY:
1193 if (TYPE_VECTOR (type))
1195 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1198 if (*fundamental_type == nullptr)
1199 *fundamental_type = type;
1200 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1201 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1208 struct type *target_type = TYPE_TARGET_TYPE (type);
1209 int count = aapcs_is_vfp_call_or_return_candidate_1
1210 (target_type, fundamental_type);
1215 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1220 case TYPE_CODE_STRUCT:
1221 case TYPE_CODE_UNION:
1225 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1227 /* Ignore any static fields. */
1228 if (field_is_static (&TYPE_FIELD (type, i)))
1231 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1233 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1234 (member, fundamental_type);
1235 if (sub_count == -1)
1240 /* Ensure there is no padding between the fields (allowing for empty
1241 zero length structs) */
1242 int ftype_length = (*fundamental_type == nullptr)
1243 ? 0 : TYPE_LENGTH (*fundamental_type);
1244 if (count * ftype_length != TYPE_LENGTH (type))
1257 /* Return true if an argument, whose type is described by TYPE, can be passed or
1258 returned in simd/fp registers, providing enough parameter passing registers
1259 are available. This is as described in the AAPCS64.
1261 Upon successful return, *COUNT returns the number of needed registers,
1262 *FUNDAMENTAL_TYPE contains the type of those registers.
1264 Candidate as per the AAPCS64 5.4.2.C is either a:
1267 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1268 all the members are floats and has at most 4 members.
1269 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1270 all the members are short vectors and has at most 4 members.
1273 Note that HFAs and HVAs can include nested structures and arrays. */
1276 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1277 struct type **fundamental_type)
1279 if (type == nullptr)
1282 *fundamental_type = nullptr;
1284 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1287 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1296 /* AArch64 function call information structure. */
1297 struct aarch64_call_info
1299 /* the current argument number. */
1302 /* The next general purpose register number, equivalent to NGRN as
1303 described in the AArch64 Procedure Call Standard. */
1306 /* The next SIMD and floating point register number, equivalent to
1307 NSRN as described in the AArch64 Procedure Call Standard. */
1310 /* The next stacked argument address, equivalent to NSAA as
1311 described in the AArch64 Procedure Call Standard. */
1314 /* Stack item vector. */
1315 VEC(stack_item_t) *si;
1318 /* Pass a value in a sequence of consecutive X registers. The caller
1319 is responsbile for ensuring sufficient registers are available. */
1322 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1323 struct aarch64_call_info *info, struct type *type,
1326 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1327 int len = TYPE_LENGTH (type);
1328 enum type_code typecode = TYPE_CODE (type);
1329 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1330 const bfd_byte *buf = value_contents (arg);
1336 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1337 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1341 /* Adjust sub-word struct/union args when big-endian. */
1342 if (byte_order == BFD_ENDIAN_BIG
1343 && partial_len < X_REGISTER_SIZE
1344 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1345 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1349 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1350 gdbarch_register_name (gdbarch, regnum),
1351 phex (regval, X_REGISTER_SIZE));
1353 regcache_cooked_write_unsigned (regcache, regnum, regval);
1360 /* Attempt to marshall a value in a V register. Return 1 if
1361 successful, or 0 if insufficient registers are available. This
1362 function, unlike the equivalent pass_in_x() function does not
1363 handle arguments spread across multiple registers. */
1366 pass_in_v (struct gdbarch *gdbarch,
1367 struct regcache *regcache,
1368 struct aarch64_call_info *info,
1369 int len, const bfd_byte *buf)
1373 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1374 /* Enough space for a full vector register. */
1375 gdb_byte reg[register_size (gdbarch, regnum)];
1376 gdb_assert (len <= sizeof (reg));
1381 memset (reg, 0, sizeof (reg));
1382 /* PCS C.1, the argument is allocated to the least significant
1383 bits of V register. */
1384 memcpy (reg, buf, len);
1385 regcache->cooked_write (regnum, reg);
1389 debug_printf ("arg %d in %s\n", info->argnum,
1390 gdbarch_register_name (gdbarch, regnum));
1398 /* Marshall an argument onto the stack. */
1401 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1404 const bfd_byte *buf = value_contents (arg);
1405 int len = TYPE_LENGTH (type);
1411 align = aarch64_type_align (type);
1413 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1414 Natural alignment of the argument's type. */
1415 align = align_up (align, 8);
1417 /* The AArch64 PCS requires at most doubleword alignment. */
1423 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1429 VEC_safe_push (stack_item_t, info->si, &item);
1432 if (info->nsaa & (align - 1))
1434 /* Push stack alignment padding. */
1435 int pad = align - (info->nsaa & (align - 1));
1440 VEC_safe_push (stack_item_t, info->si, &item);
1445 /* Marshall an argument into a sequence of one or more consecutive X
1446 registers or, if insufficient X registers are available then onto
1450 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1451 struct aarch64_call_info *info, struct type *type,
1454 int len = TYPE_LENGTH (type);
1455 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1457 /* PCS C.13 - Pass in registers if we have enough spare */
1458 if (info->ngrn + nregs <= 8)
1460 pass_in_x (gdbarch, regcache, info, type, arg);
1461 info->ngrn += nregs;
1466 pass_on_stack (info, type, arg);
1470 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1471 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1472 registers. A return value of false is an error state as the value will have
1473 been partially passed to the stack. */
1475 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1476 struct aarch64_call_info *info, struct type *arg_type,
1479 switch (TYPE_CODE (arg_type))
1482 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1483 value_contents (arg));
1486 case TYPE_CODE_COMPLEX:
1488 const bfd_byte *buf = value_contents (arg);
1489 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1491 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1495 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1496 buf + TYPE_LENGTH (target_type));
1499 case TYPE_CODE_ARRAY:
1500 if (TYPE_VECTOR (arg_type))
1501 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1502 value_contents (arg));
1505 case TYPE_CODE_STRUCT:
1506 case TYPE_CODE_UNION:
1507 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1509 /* Don't include static fields. */
1510 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1513 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1514 struct type *field_type = check_typedef (value_type (field));
1516 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1527 /* Implement the "push_dummy_call" gdbarch method. */
1530 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1531 struct regcache *regcache, CORE_ADDR bp_addr,
1533 struct value **args, CORE_ADDR sp,
1534 function_call_return_method return_method,
1535 CORE_ADDR struct_addr)
1538 struct aarch64_call_info info;
1540 memset (&info, 0, sizeof (info));
1542 /* We need to know what the type of the called function is in order
1543 to determine the number of named/anonymous arguments for the
1544 actual argument placement, and the return type in order to handle
1545 return value correctly.
1547 The generic code above us views the decision of return in memory
1548 or return in registers as a two stage processes. The language
1549 handler is consulted first and may decide to return in memory (eg
1550 class with copy constructor returned by value), this will cause
1551 the generic code to allocate space AND insert an initial leading
1554 If the language code does not decide to pass in memory then the
1555 target code is consulted.
1557 If the language code decides to pass in memory we want to move
1558 the pointer inserted as the initial argument from the argument
1559 list and into X8, the conventional AArch64 struct return pointer
1562 /* Set the return address. For the AArch64, the return breakpoint
1563 is always at BP_ADDR. */
1564 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1566 /* If we were given an initial argument for the return slot, lose it. */
1567 if (return_method == return_method_hidden_param)
1573 /* The struct_return pointer occupies X8. */
1574 if (return_method != return_method_normal)
1578 debug_printf ("struct return in %s = 0x%s\n",
1579 gdbarch_register_name (gdbarch,
1580 AARCH64_STRUCT_RETURN_REGNUM),
1581 paddress (gdbarch, struct_addr));
1583 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1587 for (argnum = 0; argnum < nargs; argnum++)
1589 struct value *arg = args[argnum];
1590 struct type *arg_type, *fundamental_type;
1593 arg_type = check_typedef (value_type (arg));
1594 len = TYPE_LENGTH (arg_type);
1596 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1597 if there are enough spare registers. */
1598 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1601 if (info.nsrn + elements <= 8)
1603 /* We know that we have sufficient registers available therefore
1604 this will never need to fallback to the stack. */
1605 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1607 gdb_assert_not_reached ("Failed to push args");
1612 pass_on_stack (&info, arg_type, arg);
1617 switch (TYPE_CODE (arg_type))
1620 case TYPE_CODE_BOOL:
1621 case TYPE_CODE_CHAR:
1622 case TYPE_CODE_RANGE:
1623 case TYPE_CODE_ENUM:
1626 /* Promote to 32 bit integer. */
1627 if (TYPE_UNSIGNED (arg_type))
1628 arg_type = builtin_type (gdbarch)->builtin_uint32;
1630 arg_type = builtin_type (gdbarch)->builtin_int32;
1631 arg = value_cast (arg_type, arg);
1633 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1636 case TYPE_CODE_STRUCT:
1637 case TYPE_CODE_ARRAY:
1638 case TYPE_CODE_UNION:
1641 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1642 invisible reference. */
1644 /* Allocate aligned storage. */
1645 sp = align_down (sp - len, 16);
1647 /* Write the real data into the stack. */
1648 write_memory (sp, value_contents (arg), len);
1650 /* Construct the indirection. */
1651 arg_type = lookup_pointer_type (arg_type);
1652 arg = value_from_pointer (arg_type, sp);
1653 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1656 /* PCS C.15 / C.18 multiple values pass. */
1657 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1661 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1666 /* Make sure stack retains 16 byte alignment. */
1668 sp -= 16 - (info.nsaa & 15);
1670 while (!VEC_empty (stack_item_t, info.si))
1672 stack_item_t *si = VEC_last (stack_item_t, info.si);
1675 if (si->data != NULL)
1676 write_memory (sp, si->data, si->len);
1677 VEC_pop (stack_item_t, info.si);
1680 VEC_free (stack_item_t, info.si);
1682 /* Finally, update the SP register. */
1683 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1688 /* Implement the "frame_align" gdbarch method. */
1691 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1693 /* Align the stack to sixteen bytes. */
1694 return sp & ~(CORE_ADDR) 15;
1697 /* Return the type for an AdvSISD Q register. */
1699 static struct type *
1700 aarch64_vnq_type (struct gdbarch *gdbarch)
1702 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1704 if (tdep->vnq_type == NULL)
1709 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1712 elem = builtin_type (gdbarch)->builtin_uint128;
1713 append_composite_type_field (t, "u", elem);
1715 elem = builtin_type (gdbarch)->builtin_int128;
1716 append_composite_type_field (t, "s", elem);
1721 return tdep->vnq_type;
1724 /* Return the type for an AdvSISD D register. */
1726 static struct type *
1727 aarch64_vnd_type (struct gdbarch *gdbarch)
1729 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1731 if (tdep->vnd_type == NULL)
1736 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1739 elem = builtin_type (gdbarch)->builtin_double;
1740 append_composite_type_field (t, "f", elem);
1742 elem = builtin_type (gdbarch)->builtin_uint64;
1743 append_composite_type_field (t, "u", elem);
1745 elem = builtin_type (gdbarch)->builtin_int64;
1746 append_composite_type_field (t, "s", elem);
1751 return tdep->vnd_type;
1754 /* Return the type for an AdvSISD S register. */
1756 static struct type *
1757 aarch64_vns_type (struct gdbarch *gdbarch)
1759 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1761 if (tdep->vns_type == NULL)
1766 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1769 elem = builtin_type (gdbarch)->builtin_float;
1770 append_composite_type_field (t, "f", elem);
1772 elem = builtin_type (gdbarch)->builtin_uint32;
1773 append_composite_type_field (t, "u", elem);
1775 elem = builtin_type (gdbarch)->builtin_int32;
1776 append_composite_type_field (t, "s", elem);
1781 return tdep->vns_type;
1784 /* Return the type for an AdvSISD H register. */
1786 static struct type *
1787 aarch64_vnh_type (struct gdbarch *gdbarch)
1789 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1791 if (tdep->vnh_type == NULL)
1796 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1799 elem = builtin_type (gdbarch)->builtin_uint16;
1800 append_composite_type_field (t, "u", elem);
1802 elem = builtin_type (gdbarch)->builtin_int16;
1803 append_composite_type_field (t, "s", elem);
1808 return tdep->vnh_type;
1811 /* Return the type for an AdvSISD B register. */
1813 static struct type *
1814 aarch64_vnb_type (struct gdbarch *gdbarch)
1816 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1818 if (tdep->vnb_type == NULL)
1823 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1826 elem = builtin_type (gdbarch)->builtin_uint8;
1827 append_composite_type_field (t, "u", elem);
1829 elem = builtin_type (gdbarch)->builtin_int8;
1830 append_composite_type_field (t, "s", elem);
1835 return tdep->vnb_type;
1838 /* Return the type for an AdvSISD V register. */
1840 static struct type *
1841 aarch64_vnv_type (struct gdbarch *gdbarch)
1843 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1845 if (tdep->vnv_type == NULL)
1847 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1850 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1851 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1852 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1853 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1854 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1859 return tdep->vnv_type;
1862 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1865 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1867 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1868 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1870 if (reg == AARCH64_DWARF_SP)
1871 return AARCH64_SP_REGNUM;
1873 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1874 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1876 if (reg == AARCH64_DWARF_SVE_VG)
1877 return AARCH64_SVE_VG_REGNUM;
1879 if (reg == AARCH64_DWARF_SVE_FFR)
1880 return AARCH64_SVE_FFR_REGNUM;
1882 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1883 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1885 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1886 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1891 /* Implement the "print_insn" gdbarch method. */
1894 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1896 info->symbols = NULL;
1897 return default_print_insn (memaddr, info);
1900 /* AArch64 BRK software debug mode instruction.
1901 Note that AArch64 code is always little-endian.
1902 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1903 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1905 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1907 /* Extract from an array REGS containing the (raw) register state a
1908 function return value of type TYPE, and copy that, in virtual
1909 format, into VALBUF. */
1912 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1915 struct gdbarch *gdbarch = regs->arch ();
1916 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1918 struct type *fundamental_type;
1920 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1923 int len = TYPE_LENGTH (fundamental_type);
1925 for (int i = 0; i < elements; i++)
1927 int regno = AARCH64_V0_REGNUM + i;
1928 /* Enough space for a full vector register. */
1929 gdb_byte buf[register_size (gdbarch, regno)];
1930 gdb_assert (len <= sizeof (buf));
1934 debug_printf ("read HFA or HVA return value element %d from %s\n",
1936 gdbarch_register_name (gdbarch, regno));
1938 regs->cooked_read (regno, buf);
1940 memcpy (valbuf, buf, len);
1944 else if (TYPE_CODE (type) == TYPE_CODE_INT
1945 || TYPE_CODE (type) == TYPE_CODE_CHAR
1946 || TYPE_CODE (type) == TYPE_CODE_BOOL
1947 || TYPE_CODE (type) == TYPE_CODE_PTR
1948 || TYPE_IS_REFERENCE (type)
1949 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1951 /* If the type is a plain integer, then the access is
1952 straight-forward. Otherwise we have to play around a bit
1954 int len = TYPE_LENGTH (type);
1955 int regno = AARCH64_X0_REGNUM;
1960 /* By using store_unsigned_integer we avoid having to do
1961 anything special for small big-endian values. */
1962 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1963 store_unsigned_integer (valbuf,
1964 (len > X_REGISTER_SIZE
1965 ? X_REGISTER_SIZE : len), byte_order, tmp);
1966 len -= X_REGISTER_SIZE;
1967 valbuf += X_REGISTER_SIZE;
1972 /* For a structure or union the behaviour is as if the value had
1973 been stored to word-aligned memory and then loaded into
1974 registers with 64-bit load instruction(s). */
1975 int len = TYPE_LENGTH (type);
1976 int regno = AARCH64_X0_REGNUM;
1977 bfd_byte buf[X_REGISTER_SIZE];
1981 regs->cooked_read (regno++, buf);
1982 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1983 len -= X_REGISTER_SIZE;
1984 valbuf += X_REGISTER_SIZE;
1990 /* Will a function return an aggregate type in memory or in a
1991 register? Return 0 if an aggregate type can be returned in a
1992 register, 1 if it must be returned in memory. */
1995 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1997 type = check_typedef (type);
1999 struct type *fundamental_type;
2001 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2004 /* v0-v7 are used to return values and one register is allocated
2005 for one member. However, HFA or HVA has at most four members. */
2009 if (TYPE_LENGTH (type) > 16)
2011 /* PCS B.6 Aggregates larger than 16 bytes are passed by
2012 invisible reference. */
2020 /* Write into appropriate registers a function return value of type
2021 TYPE, given in virtual format. */
2024 aarch64_store_return_value (struct type *type, struct regcache *regs,
2025 const gdb_byte *valbuf)
2027 struct gdbarch *gdbarch = regs->arch ();
2028 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2030 struct type *fundamental_type;
2032 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2035 int len = TYPE_LENGTH (fundamental_type);
2037 for (int i = 0; i < elements; i++)
2039 int regno = AARCH64_V0_REGNUM + i;
2040 /* Enough space for a full vector register. */
2041 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2042 gdb_assert (len <= sizeof (tmpbuf));
2046 debug_printf ("write HFA or HVA return value element %d to %s\n",
2048 gdbarch_register_name (gdbarch, regno));
2051 memcpy (tmpbuf, valbuf,
2052 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2053 regs->cooked_write (regno, tmpbuf);
2057 else if (TYPE_CODE (type) == TYPE_CODE_INT
2058 || TYPE_CODE (type) == TYPE_CODE_CHAR
2059 || TYPE_CODE (type) == TYPE_CODE_BOOL
2060 || TYPE_CODE (type) == TYPE_CODE_PTR
2061 || TYPE_IS_REFERENCE (type)
2062 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2064 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2066 /* Values of one word or less are zero/sign-extended and
2068 bfd_byte tmpbuf[X_REGISTER_SIZE];
2069 LONGEST val = unpack_long (type, valbuf);
2071 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2072 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2076 /* Integral values greater than one word are stored in
2077 consecutive registers starting with r0. This will always
2078 be a multiple of the regiser size. */
2079 int len = TYPE_LENGTH (type);
2080 int regno = AARCH64_X0_REGNUM;
2084 regs->cooked_write (regno++, valbuf);
2085 len -= X_REGISTER_SIZE;
2086 valbuf += X_REGISTER_SIZE;
2092 /* For a structure or union the behaviour is as if the value had
2093 been stored to word-aligned memory and then loaded into
2094 registers with 64-bit load instruction(s). */
2095 int len = TYPE_LENGTH (type);
2096 int regno = AARCH64_X0_REGNUM;
2097 bfd_byte tmpbuf[X_REGISTER_SIZE];
2101 memcpy (tmpbuf, valbuf,
2102 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2103 regs->cooked_write (regno++, tmpbuf);
2104 len -= X_REGISTER_SIZE;
2105 valbuf += X_REGISTER_SIZE;
2110 /* Implement the "return_value" gdbarch method. */
2112 static enum return_value_convention
2113 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2114 struct type *valtype, struct regcache *regcache,
2115 gdb_byte *readbuf, const gdb_byte *writebuf)
2118 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2119 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2120 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2122 if (aarch64_return_in_memory (gdbarch, valtype))
2125 debug_printf ("return value in memory\n");
2126 return RETURN_VALUE_STRUCT_CONVENTION;
2131 aarch64_store_return_value (valtype, regcache, writebuf);
2134 aarch64_extract_return_value (valtype, regcache, readbuf);
2137 debug_printf ("return value in registers\n");
2139 return RETURN_VALUE_REGISTER_CONVENTION;
2142 /* Implement the "get_longjmp_target" gdbarch method. */
2145 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2148 gdb_byte buf[X_REGISTER_SIZE];
2149 struct gdbarch *gdbarch = get_frame_arch (frame);
2150 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2151 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2153 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2155 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2159 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2163 /* Implement the "gen_return_address" gdbarch method. */
2166 aarch64_gen_return_address (struct gdbarch *gdbarch,
2167 struct agent_expr *ax, struct axs_value *value,
2170 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2171 value->kind = axs_lvalue_register;
2172 value->u.reg = AARCH64_LR_REGNUM;
2176 /* Return the pseudo register name corresponding to register regnum. */
2179 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2181 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2183 static const char *const q_name[] =
2185 "q0", "q1", "q2", "q3",
2186 "q4", "q5", "q6", "q7",
2187 "q8", "q9", "q10", "q11",
2188 "q12", "q13", "q14", "q15",
2189 "q16", "q17", "q18", "q19",
2190 "q20", "q21", "q22", "q23",
2191 "q24", "q25", "q26", "q27",
2192 "q28", "q29", "q30", "q31",
2195 static const char *const d_name[] =
2197 "d0", "d1", "d2", "d3",
2198 "d4", "d5", "d6", "d7",
2199 "d8", "d9", "d10", "d11",
2200 "d12", "d13", "d14", "d15",
2201 "d16", "d17", "d18", "d19",
2202 "d20", "d21", "d22", "d23",
2203 "d24", "d25", "d26", "d27",
2204 "d28", "d29", "d30", "d31",
2207 static const char *const s_name[] =
2209 "s0", "s1", "s2", "s3",
2210 "s4", "s5", "s6", "s7",
2211 "s8", "s9", "s10", "s11",
2212 "s12", "s13", "s14", "s15",
2213 "s16", "s17", "s18", "s19",
2214 "s20", "s21", "s22", "s23",
2215 "s24", "s25", "s26", "s27",
2216 "s28", "s29", "s30", "s31",
2219 static const char *const h_name[] =
2221 "h0", "h1", "h2", "h3",
2222 "h4", "h5", "h6", "h7",
2223 "h8", "h9", "h10", "h11",
2224 "h12", "h13", "h14", "h15",
2225 "h16", "h17", "h18", "h19",
2226 "h20", "h21", "h22", "h23",
2227 "h24", "h25", "h26", "h27",
2228 "h28", "h29", "h30", "h31",
2231 static const char *const b_name[] =
2233 "b0", "b1", "b2", "b3",
2234 "b4", "b5", "b6", "b7",
2235 "b8", "b9", "b10", "b11",
2236 "b12", "b13", "b14", "b15",
2237 "b16", "b17", "b18", "b19",
2238 "b20", "b21", "b22", "b23",
2239 "b24", "b25", "b26", "b27",
2240 "b28", "b29", "b30", "b31",
2243 regnum -= gdbarch_num_regs (gdbarch);
2245 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2246 return q_name[regnum - AARCH64_Q0_REGNUM];
2248 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2249 return d_name[regnum - AARCH64_D0_REGNUM];
2251 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2252 return s_name[regnum - AARCH64_S0_REGNUM];
2254 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2255 return h_name[regnum - AARCH64_H0_REGNUM];
2257 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2258 return b_name[regnum - AARCH64_B0_REGNUM];
2260 if (tdep->has_sve ())
2262 static const char *const sve_v_name[] =
2264 "v0", "v1", "v2", "v3",
2265 "v4", "v5", "v6", "v7",
2266 "v8", "v9", "v10", "v11",
2267 "v12", "v13", "v14", "v15",
2268 "v16", "v17", "v18", "v19",
2269 "v20", "v21", "v22", "v23",
2270 "v24", "v25", "v26", "v27",
2271 "v28", "v29", "v30", "v31",
2274 if (regnum >= AARCH64_SVE_V0_REGNUM
2275 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2276 return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2279 internal_error (__FILE__, __LINE__,
2280 _("aarch64_pseudo_register_name: bad register number %d"),
2284 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2286 static struct type *
2287 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2289 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2291 regnum -= gdbarch_num_regs (gdbarch);
2293 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2294 return aarch64_vnq_type (gdbarch);
2296 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2297 return aarch64_vnd_type (gdbarch);
2299 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2300 return aarch64_vns_type (gdbarch);
2302 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2303 return aarch64_vnh_type (gdbarch);
2305 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2306 return aarch64_vnb_type (gdbarch);
2308 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2309 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2310 return aarch64_vnv_type (gdbarch);
2312 internal_error (__FILE__, __LINE__,
2313 _("aarch64_pseudo_register_type: bad register number %d"),
2317 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2320 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2321 struct reggroup *group)
2323 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2325 regnum -= gdbarch_num_regs (gdbarch);
2327 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2328 return group == all_reggroup || group == vector_reggroup;
2329 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2330 return (group == all_reggroup || group == vector_reggroup
2331 || group == float_reggroup);
2332 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2333 return (group == all_reggroup || group == vector_reggroup
2334 || group == float_reggroup);
2335 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2336 return group == all_reggroup || group == vector_reggroup;
2337 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2338 return group == all_reggroup || group == vector_reggroup;
2339 else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2340 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2341 return group == all_reggroup || group == vector_reggroup;
2343 return group == all_reggroup;
2346 /* Helper for aarch64_pseudo_read_value. */
2348 static struct value *
2349 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2350 readable_regcache *regcache, int regnum_offset,
2351 int regsize, struct value *result_value)
2353 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2355 /* Enough space for a full vector register. */
2356 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2357 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2359 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2360 mark_value_bytes_unavailable (result_value, 0,
2361 TYPE_LENGTH (value_type (result_value)));
2363 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2365 return result_value;
2368 /* Implement the "pseudo_register_read_value" gdbarch method. */
2370 static struct value *
2371 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2374 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2375 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2377 VALUE_LVAL (result_value) = lval_register;
2378 VALUE_REGNUM (result_value) = regnum;
2380 regnum -= gdbarch_num_regs (gdbarch);
2382 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2383 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2384 regnum - AARCH64_Q0_REGNUM,
2385 Q_REGISTER_SIZE, result_value);
2387 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2388 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2389 regnum - AARCH64_D0_REGNUM,
2390 D_REGISTER_SIZE, result_value);
2392 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2393 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2394 regnum - AARCH64_S0_REGNUM,
2395 S_REGISTER_SIZE, result_value);
2397 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2398 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2399 regnum - AARCH64_H0_REGNUM,
2400 H_REGISTER_SIZE, result_value);
2402 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2403 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2404 regnum - AARCH64_B0_REGNUM,
2405 B_REGISTER_SIZE, result_value);
2407 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2408 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2409 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2410 regnum - AARCH64_SVE_V0_REGNUM,
2411 V_REGISTER_SIZE, result_value);
2413 gdb_assert_not_reached ("regnum out of bound");
2416 /* Helper for aarch64_pseudo_write. */
2419 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2420 int regnum_offset, int regsize, const gdb_byte *buf)
2422 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2424 /* Enough space for a full vector register. */
2425 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2426 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2428 /* Ensure the register buffer is zero, we want gdb writes of the
2429 various 'scalar' pseudo registers to behavior like architectural
2430 writes, register width bytes are written the remainder are set to
2432 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2434 memcpy (reg_buf, buf, regsize);
2435 regcache->raw_write (v_regnum, reg_buf);
2438 /* Implement the "pseudo_register_write" gdbarch method. */
2441 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2442 int regnum, const gdb_byte *buf)
2444 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2445 regnum -= gdbarch_num_regs (gdbarch);
2447 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2448 return aarch64_pseudo_write_1 (gdbarch, regcache,
2449 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2452 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2453 return aarch64_pseudo_write_1 (gdbarch, regcache,
2454 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2457 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2458 return aarch64_pseudo_write_1 (gdbarch, regcache,
2459 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2462 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2463 return aarch64_pseudo_write_1 (gdbarch, regcache,
2464 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2467 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2468 return aarch64_pseudo_write_1 (gdbarch, regcache,
2469 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2472 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2473 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2474 return aarch64_pseudo_write_1 (gdbarch, regcache,
2475 regnum - AARCH64_SVE_V0_REGNUM,
2476 V_REGISTER_SIZE, buf);
2478 gdb_assert_not_reached ("regnum out of bound");
2481 /* Callback function for user_reg_add. */
2483 static struct value *
2484 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2486 const int *reg_p = (const int *) baton;
2488 return value_of_register (*reg_p, frame);
2492 /* Implement the "software_single_step" gdbarch method, needed to
2493 single step through atomic sequences on AArch64. */
2495 static std::vector<CORE_ADDR>
2496 aarch64_software_single_step (struct regcache *regcache)
2498 struct gdbarch *gdbarch = regcache->arch ();
2499 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2500 const int insn_size = 4;
2501 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2502 CORE_ADDR pc = regcache_read_pc (regcache);
2503 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2505 CORE_ADDR closing_insn = 0;
2506 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2507 byte_order_for_code);
2510 int bc_insn_count = 0; /* Conditional branch instruction count. */
2511 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2514 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2517 /* Look for a Load Exclusive instruction which begins the sequence. */
2518 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2521 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2524 insn = read_memory_unsigned_integer (loc, insn_size,
2525 byte_order_for_code);
2527 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2529 /* Check if the instruction is a conditional branch. */
2530 if (inst.opcode->iclass == condbranch)
2532 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2534 if (bc_insn_count >= 1)
2537 /* It is, so we'll try to set a breakpoint at the destination. */
2538 breaks[1] = loc + inst.operands[0].imm.value;
2544 /* Look for the Store Exclusive which closes the atomic sequence. */
2545 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2552 /* We didn't find a closing Store Exclusive instruction, fall back. */
2556 /* Insert breakpoint after the end of the atomic sequence. */
2557 breaks[0] = loc + insn_size;
2559 /* Check for duplicated breakpoints, and also check that the second
2560 breakpoint is not within the atomic sequence. */
2562 && (breaks[1] == breaks[0]
2563 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2564 last_breakpoint = 0;
2566 std::vector<CORE_ADDR> next_pcs;
2568 /* Insert the breakpoint at the end of the sequence, and one at the
2569 destination of the conditional branch, if it exists. */
2570 for (index = 0; index <= last_breakpoint; index++)
2571 next_pcs.push_back (breaks[index]);
2576 struct aarch64_displaced_step_closure : public displaced_step_closure
2578 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2579 is being displaced stepping. */
2582 /* PC adjustment offset after displaced stepping. */
2583 int32_t pc_adjust = 0;
2586 /* Data when visiting instructions for displaced stepping. */
2588 struct aarch64_displaced_step_data
2590 struct aarch64_insn_data base;
2592 /* The address where the instruction will be executed at. */
2594 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2595 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2596 /* Number of instructions in INSN_BUF. */
2597 unsigned insn_count;
2598 /* Registers when doing displaced stepping. */
2599 struct regcache *regs;
2601 aarch64_displaced_step_closure *dsc;
2604 /* Implementation of aarch64_insn_visitor method "b". */
2607 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2608 struct aarch64_insn_data *data)
2610 struct aarch64_displaced_step_data *dsd
2611 = (struct aarch64_displaced_step_data *) data;
2612 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2614 if (can_encode_int32 (new_offset, 28))
2616 /* Emit B rather than BL, because executing BL on a new address
2617 will get the wrong address into LR. In order to avoid this,
2618 we emit B, and update LR if the instruction is BL. */
2619 emit_b (dsd->insn_buf, 0, new_offset);
2625 emit_nop (dsd->insn_buf);
2627 dsd->dsc->pc_adjust = offset;
2633 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2634 data->insn_addr + 4);
2638 /* Implementation of aarch64_insn_visitor method "b_cond". */
2641 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2642 struct aarch64_insn_data *data)
2644 struct aarch64_displaced_step_data *dsd
2645 = (struct aarch64_displaced_step_data *) data;
2647 /* GDB has to fix up PC after displaced step this instruction
2648 differently according to the condition is true or false. Instead
2649 of checking COND against conditional flags, we can use
2650 the following instructions, and GDB can tell how to fix up PC
2651 according to the PC value.
2653 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2659 emit_bcond (dsd->insn_buf, cond, 8);
2661 dsd->dsc->pc_adjust = offset;
2662 dsd->insn_count = 1;
2665 /* Dynamically allocate a new register. If we know the register
2666 statically, we should make it a global as above instead of using this
2669 static struct aarch64_register
2670 aarch64_register (unsigned num, int is64)
2672 return (struct aarch64_register) { num, is64 };
2675 /* Implementation of aarch64_insn_visitor method "cb". */
2678 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2679 const unsigned rn, int is64,
2680 struct aarch64_insn_data *data)
2682 struct aarch64_displaced_step_data *dsd
2683 = (struct aarch64_displaced_step_data *) data;
2685 /* The offset is out of range for a compare and branch
2686 instruction. We can use the following instructions instead:
2688 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2693 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2694 dsd->insn_count = 1;
2696 dsd->dsc->pc_adjust = offset;
2699 /* Implementation of aarch64_insn_visitor method "tb". */
2702 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2703 const unsigned rt, unsigned bit,
2704 struct aarch64_insn_data *data)
2706 struct aarch64_displaced_step_data *dsd
2707 = (struct aarch64_displaced_step_data *) data;
2709 /* The offset is out of range for a test bit and branch
2710 instruction We can use the following instructions instead:
2712 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2718 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2719 dsd->insn_count = 1;
2721 dsd->dsc->pc_adjust = offset;
2724 /* Implementation of aarch64_insn_visitor method "adr". */
2727 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2728 const int is_adrp, struct aarch64_insn_data *data)
2730 struct aarch64_displaced_step_data *dsd
2731 = (struct aarch64_displaced_step_data *) data;
2732 /* We know exactly the address the ADR{P,} instruction will compute.
2733 We can just write it to the destination register. */
2734 CORE_ADDR address = data->insn_addr + offset;
2738 /* Clear the lower 12 bits of the offset to get the 4K page. */
2739 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2743 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2746 dsd->dsc->pc_adjust = 4;
2747 emit_nop (dsd->insn_buf);
2748 dsd->insn_count = 1;
2751 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2754 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2755 const unsigned rt, const int is64,
2756 struct aarch64_insn_data *data)
2758 struct aarch64_displaced_step_data *dsd
2759 = (struct aarch64_displaced_step_data *) data;
2760 CORE_ADDR address = data->insn_addr + offset;
2761 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2763 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2767 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2768 aarch64_register (rt, 1), zero);
2770 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2771 aarch64_register (rt, 1), zero);
2773 dsd->dsc->pc_adjust = 4;
2776 /* Implementation of aarch64_insn_visitor method "others". */
2779 aarch64_displaced_step_others (const uint32_t insn,
2780 struct aarch64_insn_data *data)
2782 struct aarch64_displaced_step_data *dsd
2783 = (struct aarch64_displaced_step_data *) data;
2785 aarch64_emit_insn (dsd->insn_buf, insn);
2786 dsd->insn_count = 1;
2788 if ((insn & 0xfffffc1f) == 0xd65f0000)
2791 dsd->dsc->pc_adjust = 0;
2794 dsd->dsc->pc_adjust = 4;
2797 static const struct aarch64_insn_visitor visitor =
2799 aarch64_displaced_step_b,
2800 aarch64_displaced_step_b_cond,
2801 aarch64_displaced_step_cb,
2802 aarch64_displaced_step_tb,
2803 aarch64_displaced_step_adr,
2804 aarch64_displaced_step_ldr_literal,
2805 aarch64_displaced_step_others,
2808 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2810 struct displaced_step_closure *
2811 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2812 CORE_ADDR from, CORE_ADDR to,
2813 struct regcache *regs)
2815 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2816 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2817 struct aarch64_displaced_step_data dsd;
2820 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2823 /* Look for a Load Exclusive instruction which begins the sequence. */
2824 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2826 /* We can't displaced step atomic sequences. */
2830 std::unique_ptr<aarch64_displaced_step_closure> dsc
2831 (new aarch64_displaced_step_closure);
2832 dsd.base.insn_addr = from;
2835 dsd.dsc = dsc.get ();
2837 aarch64_relocate_instruction (insn, &visitor,
2838 (struct aarch64_insn_data *) &dsd);
2839 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2841 if (dsd.insn_count != 0)
2845 /* Instruction can be relocated to scratch pad. Copy
2846 relocated instruction(s) there. */
2847 for (i = 0; i < dsd.insn_count; i++)
2849 if (debug_displaced)
2851 debug_printf ("displaced: writing insn ");
2852 debug_printf ("%.8x", dsd.insn_buf[i]);
2853 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2855 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2856 (ULONGEST) dsd.insn_buf[i]);
2864 return dsc.release ();
2867 /* Implement the "displaced_step_fixup" gdbarch method. */
2870 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2871 struct displaced_step_closure *dsc_,
2872 CORE_ADDR from, CORE_ADDR to,
2873 struct regcache *regs)
2875 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2881 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2884 /* Condition is true. */
2886 else if (pc - to == 4)
2888 /* Condition is false. */
2892 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2895 if (dsc->pc_adjust != 0)
2897 if (debug_displaced)
2899 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2900 paddress (gdbarch, from), dsc->pc_adjust);
2902 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2903 from + dsc->pc_adjust);
2907 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2910 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2911 struct displaced_step_closure *closure)
2916 /* Get the correct target description for the given VQ value.
2917 If VQ is zero then it is assumed SVE is not supported.
2918 (It is not possible to set VQ to zero on an SVE system). */
2921 aarch64_read_description (uint64_t vq)
2923 if (vq > AARCH64_MAX_SVE_VQ)
2924 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
2925 AARCH64_MAX_SVE_VQ);
2927 struct target_desc *tdesc = tdesc_aarch64_list[vq];
2931 tdesc = aarch64_create_target_description (vq);
2932 tdesc_aarch64_list[vq] = tdesc;
2938 /* Return the VQ used when creating the target description TDESC. */
2941 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2943 const struct tdesc_feature *feature_sve;
2945 if (!tdesc_has_registers (tdesc))
2948 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2950 if (feature_sve == nullptr)
2953 uint64_t vl = tdesc_register_bitsize (feature_sve,
2954 aarch64_sve_register_names[0]) / 8;
2955 return sve_vq_from_vl (vl);
2958 /* Add all the expected register sets into GDBARCH. */
2961 aarch64_add_reggroups (struct gdbarch *gdbarch)
2963 reggroup_add (gdbarch, general_reggroup);
2964 reggroup_add (gdbarch, float_reggroup);
2965 reggroup_add (gdbarch, system_reggroup);
2966 reggroup_add (gdbarch, vector_reggroup);
2967 reggroup_add (gdbarch, all_reggroup);
2968 reggroup_add (gdbarch, save_reggroup);
2969 reggroup_add (gdbarch, restore_reggroup);
2972 /* Initialize the current architecture based on INFO. If possible,
2973 re-use an architecture from ARCHES, which is a list of
2974 architectures already created during this debugging session.
2976 Called e.g. at program startup, when reading a core file, and when
2977 reading a binary file. */
2979 static struct gdbarch *
2980 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2982 struct gdbarch_tdep *tdep;
2983 struct gdbarch *gdbarch;
2984 struct gdbarch_list *best_arch;
2985 struct tdesc_arch_data *tdesc_data = NULL;
2986 const struct target_desc *tdesc = info.target_desc;
2989 const struct tdesc_feature *feature_core;
2990 const struct tdesc_feature *feature_fpu;
2991 const struct tdesc_feature *feature_sve;
2993 int num_pseudo_regs = 0;
2995 /* Ensure we always have a target description. */
2996 if (!tdesc_has_registers (tdesc))
2997 tdesc = aarch64_read_description (0);
3000 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
3001 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3002 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3004 if (feature_core == NULL)
3007 tdesc_data = tdesc_data_alloc ();
3009 /* Validate the description provides the mandatory core R registers
3010 and allocate their numbers. */
3011 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3012 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3013 AARCH64_X0_REGNUM + i,
3014 aarch64_r_register_names[i]);
3016 num_regs = AARCH64_X0_REGNUM + i;
3018 /* Add the V registers. */
3019 if (feature_fpu != NULL)
3021 if (feature_sve != NULL)
3022 error (_("Program contains both fpu and SVE features."));
3024 /* Validate the description provides the mandatory V registers
3025 and allocate their numbers. */
3026 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3027 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3028 AARCH64_V0_REGNUM + i,
3029 aarch64_v_register_names[i]);
3031 num_regs = AARCH64_V0_REGNUM + i;
3034 /* Add the SVE registers. */
3035 if (feature_sve != NULL)
3037 /* Validate the description provides the mandatory SVE registers
3038 and allocate their numbers. */
3039 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3040 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3041 AARCH64_SVE_Z0_REGNUM + i,
3042 aarch64_sve_register_names[i]);
3044 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3045 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3048 if (feature_fpu != NULL || feature_sve != NULL)
3050 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3051 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3052 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3053 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3054 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3059 tdesc_data_cleanup (tdesc_data);
3063 /* AArch64 code is always little-endian. */
3064 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3066 /* If there is already a candidate, use it. */
3067 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3069 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3071 /* Found a match. */
3075 if (best_arch != NULL)
3077 if (tdesc_data != NULL)
3078 tdesc_data_cleanup (tdesc_data);
3079 return best_arch->gdbarch;
3082 tdep = XCNEW (struct gdbarch_tdep);
3083 gdbarch = gdbarch_alloc (&info, tdep);
3085 /* This should be low enough for everything. */
3086 tdep->lowest_pc = 0x20;
3087 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3088 tdep->jb_elt_size = 8;
3089 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3091 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3092 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3094 /* Frame handling. */
3095 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3096 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3097 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3099 /* Advance PC across function entry code. */
3100 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3102 /* The stack grows downward. */
3103 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3105 /* Breakpoint manipulation. */
3106 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3107 aarch64_breakpoint::kind_from_pc);
3108 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3109 aarch64_breakpoint::bp_from_kind);
3110 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3111 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3113 /* Information about registers, etc. */
3114 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3115 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3116 set_gdbarch_num_regs (gdbarch, num_regs);
3118 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3119 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3120 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3121 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3122 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3123 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3124 aarch64_pseudo_register_reggroup_p);
3127 set_gdbarch_short_bit (gdbarch, 16);
3128 set_gdbarch_int_bit (gdbarch, 32);
3129 set_gdbarch_float_bit (gdbarch, 32);
3130 set_gdbarch_double_bit (gdbarch, 64);
3131 set_gdbarch_long_double_bit (gdbarch, 128);
3132 set_gdbarch_long_bit (gdbarch, 64);
3133 set_gdbarch_long_long_bit (gdbarch, 64);
3134 set_gdbarch_ptr_bit (gdbarch, 64);
3135 set_gdbarch_char_signed (gdbarch, 0);
3136 set_gdbarch_wchar_signed (gdbarch, 0);
3137 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3138 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3139 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3141 /* Internal <-> external register number maps. */
3142 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3144 /* Returning results. */
3145 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3148 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3150 /* Virtual tables. */
3151 set_gdbarch_vbit_in_delta (gdbarch, 1);
3153 /* Register architecture. */
3154 aarch64_add_reggroups (gdbarch);
3156 /* Hook in the ABI-specific overrides, if they have been registered. */
3157 info.target_desc = tdesc;
3158 info.tdesc_data = tdesc_data;
3159 gdbarch_init_osabi (info, gdbarch);
3161 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3163 /* Add some default predicates. */
3164 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3165 dwarf2_append_unwinders (gdbarch);
3166 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3168 frame_base_set_default (gdbarch, &aarch64_normal_base);
3170 /* Now we have tuned the configuration, set a few final things,
3171 based on what the OS ABI has told us. */
3173 if (tdep->jb_pc >= 0)
3174 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3176 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3178 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3180 /* Add standard register aliases. */
3181 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3182 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3183 value_of_aarch64_user_reg,
3184 &aarch64_register_aliases[i].regnum);
3186 register_aarch64_ravenscar_ops (gdbarch);
3192 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3194 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3199 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3200 paddress (gdbarch, tdep->lowest_pc));
3206 static void aarch64_process_record_test (void);
3211 _initialize_aarch64_tdep (void)
3213 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3216 /* Debug this file's internals. */
3217 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3218 Set AArch64 debugging."), _("\
3219 Show AArch64 debugging."), _("\
3220 When on, AArch64 specific debugging is enabled."),
3223 &setdebuglist, &showdebuglist);
3226 selftests::register_test ("aarch64-analyze-prologue",
3227 selftests::aarch64_analyze_prologue_test);
3228 selftests::register_test ("aarch64-process-record",
3229 selftests::aarch64_process_record_test);
3230 selftests::record_xml_tdesc ("aarch64.xml",
3231 aarch64_create_target_description (0));
3235 /* AArch64 process record-replay related structures, defines etc. */
3237 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3240 unsigned int reg_len = LENGTH; \
3243 REGS = XNEWVEC (uint32_t, reg_len); \
3244 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3249 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3252 unsigned int mem_len = LENGTH; \
3255 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3256 memcpy(&MEMS->len, &RECORD_BUF[0], \
3257 sizeof(struct aarch64_mem_r) * LENGTH); \
3262 /* AArch64 record/replay structures and enumerations. */
3264 struct aarch64_mem_r
3266 uint64_t len; /* Record length. */
3267 uint64_t addr; /* Memory address. */
3270 enum aarch64_record_result
3272 AARCH64_RECORD_SUCCESS,
3273 AARCH64_RECORD_UNSUPPORTED,
3274 AARCH64_RECORD_UNKNOWN
3277 typedef struct insn_decode_record_t
3279 struct gdbarch *gdbarch;
3280 struct regcache *regcache;
3281 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3282 uint32_t aarch64_insn; /* Insn to be recorded. */
3283 uint32_t mem_rec_count; /* Count of memory records. */
3284 uint32_t reg_rec_count; /* Count of register records. */
3285 uint32_t *aarch64_regs; /* Registers to be recorded. */
3286 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3287 } insn_decode_record;
3289 /* Record handler for data processing - register instructions. */
3292 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3294 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3295 uint32_t record_buf[4];
3297 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3298 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3299 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3301 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3305 /* Logical (shifted register). */
3306 if (insn_bits24_27 == 0x0a)
3307 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3309 else if (insn_bits24_27 == 0x0b)
3310 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3312 return AARCH64_RECORD_UNKNOWN;
3314 record_buf[0] = reg_rd;
3315 aarch64_insn_r->reg_rec_count = 1;
3317 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3321 if (insn_bits24_27 == 0x0b)
3323 /* Data-processing (3 source). */
3324 record_buf[0] = reg_rd;
3325 aarch64_insn_r->reg_rec_count = 1;
3327 else if (insn_bits24_27 == 0x0a)
3329 if (insn_bits21_23 == 0x00)
3331 /* Add/subtract (with carry). */
3332 record_buf[0] = reg_rd;
3333 aarch64_insn_r->reg_rec_count = 1;
3334 if (bit (aarch64_insn_r->aarch64_insn, 29))
3336 record_buf[1] = AARCH64_CPSR_REGNUM;
3337 aarch64_insn_r->reg_rec_count = 2;
3340 else if (insn_bits21_23 == 0x02)
3342 /* Conditional compare (register) and conditional compare
3343 (immediate) instructions. */
3344 record_buf[0] = AARCH64_CPSR_REGNUM;
3345 aarch64_insn_r->reg_rec_count = 1;
3347 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3349 /* CConditional select. */
3350 /* Data-processing (2 source). */
3351 /* Data-processing (1 source). */
3352 record_buf[0] = reg_rd;
3353 aarch64_insn_r->reg_rec_count = 1;
3356 return AARCH64_RECORD_UNKNOWN;
3360 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3362 return AARCH64_RECORD_SUCCESS;
3365 /* Record handler for data processing - immediate instructions. */
3368 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3370 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3371 uint32_t record_buf[4];
3373 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3374 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3375 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3377 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3378 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3379 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3381 record_buf[0] = reg_rd;
3382 aarch64_insn_r->reg_rec_count = 1;
3384 else if (insn_bits24_27 == 0x01)
3386 /* Add/Subtract (immediate). */
3387 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3388 record_buf[0] = reg_rd;
3389 aarch64_insn_r->reg_rec_count = 1;
3391 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3393 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3395 /* Logical (immediate). */
3396 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3397 record_buf[0] = reg_rd;
3398 aarch64_insn_r->reg_rec_count = 1;
3400 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3403 return AARCH64_RECORD_UNKNOWN;
3405 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3407 return AARCH64_RECORD_SUCCESS;
3410 /* Record handler for branch, exception generation and system instructions. */
3413 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3415 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3416 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3417 uint32_t record_buf[4];
3419 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3420 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3421 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3423 if (insn_bits28_31 == 0x0d)
3425 /* Exception generation instructions. */
3426 if (insn_bits24_27 == 0x04)
3428 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3429 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3430 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3432 ULONGEST svc_number;
3434 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3436 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3440 return AARCH64_RECORD_UNSUPPORTED;
3442 /* System instructions. */
3443 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3445 uint32_t reg_rt, reg_crn;
3447 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3448 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3450 /* Record rt in case of sysl and mrs instructions. */
3451 if (bit (aarch64_insn_r->aarch64_insn, 21))
3453 record_buf[0] = reg_rt;
3454 aarch64_insn_r->reg_rec_count = 1;
3456 /* Record cpsr for hint and msr(immediate) instructions. */
3457 else if (reg_crn == 0x02 || reg_crn == 0x04)
3459 record_buf[0] = AARCH64_CPSR_REGNUM;
3460 aarch64_insn_r->reg_rec_count = 1;
3463 /* Unconditional branch (register). */
3464 else if((insn_bits24_27 & 0x0e) == 0x06)
3466 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3467 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3468 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3471 return AARCH64_RECORD_UNKNOWN;
3473 /* Unconditional branch (immediate). */
3474 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3476 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3477 if (bit (aarch64_insn_r->aarch64_insn, 31))
3478 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3481 /* Compare & branch (immediate), Test & branch (immediate) and
3482 Conditional branch (immediate). */
3483 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3485 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3487 return AARCH64_RECORD_SUCCESS;
3490 /* Record handler for advanced SIMD load and store instructions. */
3493 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3496 uint64_t addr_offset = 0;
3497 uint32_t record_buf[24];
3498 uint64_t record_buf_mem[24];
3499 uint32_t reg_rn, reg_rt;
3500 uint32_t reg_index = 0, mem_index = 0;
3501 uint8_t opcode_bits, size_bits;
3503 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3504 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3505 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3506 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3507 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3510 debug_printf ("Process record: Advanced SIMD load/store\n");
3512 /* Load/store single structure. */
3513 if (bit (aarch64_insn_r->aarch64_insn, 24))
3515 uint8_t sindex, scale, selem, esize, replicate = 0;
3516 scale = opcode_bits >> 2;
3517 selem = ((opcode_bits & 0x02) |
3518 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3522 if (size_bits & 0x01)
3523 return AARCH64_RECORD_UNKNOWN;
3526 if ((size_bits >> 1) & 0x01)
3527 return AARCH64_RECORD_UNKNOWN;
3528 if (size_bits & 0x01)
3530 if (!((opcode_bits >> 1) & 0x01))
3533 return AARCH64_RECORD_UNKNOWN;
3537 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3544 return AARCH64_RECORD_UNKNOWN;
3550 for (sindex = 0; sindex < selem; sindex++)
3552 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3553 reg_rt = (reg_rt + 1) % 32;
3557 for (sindex = 0; sindex < selem; sindex++)
3559 if (bit (aarch64_insn_r->aarch64_insn, 22))
3560 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3563 record_buf_mem[mem_index++] = esize / 8;
3564 record_buf_mem[mem_index++] = address + addr_offset;
3566 addr_offset = addr_offset + (esize / 8);
3567 reg_rt = (reg_rt + 1) % 32;
3571 /* Load/store multiple structure. */
3574 uint8_t selem, esize, rpt, elements;
3575 uint8_t eindex, rindex;
3577 esize = 8 << size_bits;
3578 if (bit (aarch64_insn_r->aarch64_insn, 30))
3579 elements = 128 / esize;
3581 elements = 64 / esize;
3583 switch (opcode_bits)
3585 /*LD/ST4 (4 Registers). */
3590 /*LD/ST1 (4 Registers). */
3595 /*LD/ST3 (3 Registers). */
3600 /*LD/ST1 (3 Registers). */
3605 /*LD/ST1 (1 Register). */
3610 /*LD/ST2 (2 Registers). */
3615 /*LD/ST1 (2 Registers). */
3621 return AARCH64_RECORD_UNSUPPORTED;
3624 for (rindex = 0; rindex < rpt; rindex++)
3625 for (eindex = 0; eindex < elements; eindex++)
3627 uint8_t reg_tt, sindex;
3628 reg_tt = (reg_rt + rindex) % 32;
3629 for (sindex = 0; sindex < selem; sindex++)
3631 if (bit (aarch64_insn_r->aarch64_insn, 22))
3632 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3635 record_buf_mem[mem_index++] = esize / 8;
3636 record_buf_mem[mem_index++] = address + addr_offset;
3638 addr_offset = addr_offset + (esize / 8);
3639 reg_tt = (reg_tt + 1) % 32;
3644 if (bit (aarch64_insn_r->aarch64_insn, 23))
3645 record_buf[reg_index++] = reg_rn;
3647 aarch64_insn_r->reg_rec_count = reg_index;
3648 aarch64_insn_r->mem_rec_count = mem_index / 2;
3649 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3651 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3653 return AARCH64_RECORD_SUCCESS;
3656 /* Record handler for load and store instructions. */
3659 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3661 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3662 uint8_t insn_bit23, insn_bit21;
3663 uint8_t opc, size_bits, ld_flag, vector_flag;
3664 uint32_t reg_rn, reg_rt, reg_rt2;
3665 uint64_t datasize, offset;
3666 uint32_t record_buf[8];
3667 uint64_t record_buf_mem[8];
3670 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3671 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3672 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3673 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3674 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3675 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3676 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3677 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3678 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3679 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3680 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3682 /* Load/store exclusive. */
3683 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3686 debug_printf ("Process record: load/store exclusive\n");
3690 record_buf[0] = reg_rt;
3691 aarch64_insn_r->reg_rec_count = 1;
3694 record_buf[1] = reg_rt2;
3695 aarch64_insn_r->reg_rec_count = 2;
3701 datasize = (8 << size_bits) * 2;
3703 datasize = (8 << size_bits);
3704 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3706 record_buf_mem[0] = datasize / 8;
3707 record_buf_mem[1] = address;
3708 aarch64_insn_r->mem_rec_count = 1;
3711 /* Save register rs. */
3712 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3713 aarch64_insn_r->reg_rec_count = 1;
3717 /* Load register (literal) instructions decoding. */
3718 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3721 debug_printf ("Process record: load register (literal)\n");
3723 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3725 record_buf[0] = reg_rt;
3726 aarch64_insn_r->reg_rec_count = 1;
3728 /* All types of load/store pair instructions decoding. */
3729 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3732 debug_printf ("Process record: load/store pair\n");
3738 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3739 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3743 record_buf[0] = reg_rt;
3744 record_buf[1] = reg_rt2;
3746 aarch64_insn_r->reg_rec_count = 2;
3751 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3753 size_bits = size_bits >> 1;
3754 datasize = 8 << (2 + size_bits);
3755 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3756 offset = offset << (2 + size_bits);
3757 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3759 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3761 if (imm7_off & 0x40)
3762 address = address - offset;
3764 address = address + offset;
3767 record_buf_mem[0] = datasize / 8;
3768 record_buf_mem[1] = address;
3769 record_buf_mem[2] = datasize / 8;
3770 record_buf_mem[3] = address + (datasize / 8);
3771 aarch64_insn_r->mem_rec_count = 2;
3773 if (bit (aarch64_insn_r->aarch64_insn, 23))
3774 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3776 /* Load/store register (unsigned immediate) instructions. */
3777 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3779 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3789 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3791 /* PRFM (immediate) */
3792 return AARCH64_RECORD_SUCCESS;
3794 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3796 /* LDRSW (immediate) */
3810 debug_printf ("Process record: load/store (unsigned immediate):"
3811 " size %x V %d opc %x\n", size_bits, vector_flag,
3817 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3818 datasize = 8 << size_bits;
3819 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3821 offset = offset << size_bits;
3822 address = address + offset;
3824 record_buf_mem[0] = datasize >> 3;
3825 record_buf_mem[1] = address;
3826 aarch64_insn_r->mem_rec_count = 1;
3831 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3833 record_buf[0] = reg_rt;
3834 aarch64_insn_r->reg_rec_count = 1;
3837 /* Load/store register (register offset) instructions. */
3838 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3839 && insn_bits10_11 == 0x02 && insn_bit21)
3842 debug_printf ("Process record: load/store (register offset)\n");
3843 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3850 if (size_bits != 0x03)
3853 return AARCH64_RECORD_UNKNOWN;
3857 ULONGEST reg_rm_val;
3859 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3860 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3861 if (bit (aarch64_insn_r->aarch64_insn, 12))
3862 offset = reg_rm_val << size_bits;
3864 offset = reg_rm_val;
3865 datasize = 8 << size_bits;
3866 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3868 address = address + offset;
3869 record_buf_mem[0] = datasize >> 3;
3870 record_buf_mem[1] = address;
3871 aarch64_insn_r->mem_rec_count = 1;
3876 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3878 record_buf[0] = reg_rt;
3879 aarch64_insn_r->reg_rec_count = 1;
3882 /* Load/store register (immediate and unprivileged) instructions. */
3883 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3888 debug_printf ("Process record: load/store "
3889 "(immediate and unprivileged)\n");
3891 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3898 if (size_bits != 0x03)
3901 return AARCH64_RECORD_UNKNOWN;
3906 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3907 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3908 datasize = 8 << size_bits;
3909 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3911 if (insn_bits10_11 != 0x01)
3913 if (imm9_off & 0x0100)
3914 address = address - offset;
3916 address = address + offset;
3918 record_buf_mem[0] = datasize >> 3;
3919 record_buf_mem[1] = address;
3920 aarch64_insn_r->mem_rec_count = 1;
3925 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3927 record_buf[0] = reg_rt;
3928 aarch64_insn_r->reg_rec_count = 1;
3930 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3931 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3933 /* Advanced SIMD load/store instructions. */
3935 return aarch64_record_asimd_load_store (aarch64_insn_r);
3937 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3939 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3941 return AARCH64_RECORD_SUCCESS;
3944 /* Record handler for data processing SIMD and floating point instructions. */
3947 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3949 uint8_t insn_bit21, opcode, rmode, reg_rd;
3950 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3951 uint8_t insn_bits11_14;
3952 uint32_t record_buf[2];
3954 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3955 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3956 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3957 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3958 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3959 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3960 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3961 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3962 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3965 debug_printf ("Process record: data processing SIMD/FP: ");
3967 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3969 /* Floating point - fixed point conversion instructions. */
3973 debug_printf ("FP - fixed point conversion");
3975 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3976 record_buf[0] = reg_rd;
3978 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3980 /* Floating point - conditional compare instructions. */
3981 else if (insn_bits10_11 == 0x01)
3984 debug_printf ("FP - conditional compare");
3986 record_buf[0] = AARCH64_CPSR_REGNUM;
3988 /* Floating point - data processing (2-source) and
3989 conditional select instructions. */
3990 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3993 debug_printf ("FP - DP (2-source)");
3995 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3997 else if (insn_bits10_11 == 0x00)
3999 /* Floating point - immediate instructions. */
4000 if ((insn_bits12_15 & 0x01) == 0x01
4001 || (insn_bits12_15 & 0x07) == 0x04)
4004 debug_printf ("FP - immediate");
4005 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4007 /* Floating point - compare instructions. */
4008 else if ((insn_bits12_15 & 0x03) == 0x02)
4011 debug_printf ("FP - immediate");
4012 record_buf[0] = AARCH64_CPSR_REGNUM;
4014 /* Floating point - integer conversions instructions. */
4015 else if (insn_bits12_15 == 0x00)
4017 /* Convert float to integer instruction. */
4018 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4021 debug_printf ("float to int conversion");
4023 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4025 /* Convert integer to float instruction. */
4026 else if ((opcode >> 1) == 0x01 && !rmode)
4029 debug_printf ("int to float conversion");
4031 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4033 /* Move float to integer instruction. */
4034 else if ((opcode >> 1) == 0x03)
4037 debug_printf ("move float to int");
4039 if (!(opcode & 0x01))
4040 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4042 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4045 return AARCH64_RECORD_UNKNOWN;
4048 return AARCH64_RECORD_UNKNOWN;
4051 return AARCH64_RECORD_UNKNOWN;
4053 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4056 debug_printf ("SIMD copy");
4058 /* Advanced SIMD copy instructions. */
4059 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4060 && !bit (aarch64_insn_r->aarch64_insn, 15)
4061 && bit (aarch64_insn_r->aarch64_insn, 10))
4063 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4064 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4066 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4069 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4071 /* All remaining floating point or advanced SIMD instructions. */
4075 debug_printf ("all remain");
4077 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4081 debug_printf ("\n");
4083 aarch64_insn_r->reg_rec_count++;
4084 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4085 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4087 return AARCH64_RECORD_SUCCESS;
4090 /* Decodes insns type and invokes its record handler. */
4093 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4095 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4097 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4098 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4099 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4100 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4102 /* Data processing - immediate instructions. */
4103 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4104 return aarch64_record_data_proc_imm (aarch64_insn_r);
4106 /* Branch, exception generation and system instructions. */
4107 if (ins_bit26 && !ins_bit27 && ins_bit28)
4108 return aarch64_record_branch_except_sys (aarch64_insn_r);
4110 /* Load and store instructions. */
4111 if (!ins_bit25 && ins_bit27)
4112 return aarch64_record_load_store (aarch64_insn_r);
4114 /* Data processing - register instructions. */
4115 if (ins_bit25 && !ins_bit26 && ins_bit27)
4116 return aarch64_record_data_proc_reg (aarch64_insn_r);
4118 /* Data processing - SIMD and floating point instructions. */
4119 if (ins_bit25 && ins_bit26 && ins_bit27)
4120 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4122 return AARCH64_RECORD_UNSUPPORTED;
4125 /* Cleans up local record registers and memory allocations. */
4128 deallocate_reg_mem (insn_decode_record *record)
4130 xfree (record->aarch64_regs);
4131 xfree (record->aarch64_mems);
4135 namespace selftests {
4138 aarch64_process_record_test (void)
4140 struct gdbarch_info info;
4143 gdbarch_info_init (&info);
4144 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4146 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4147 SELF_CHECK (gdbarch != NULL);
4149 insn_decode_record aarch64_record;
4151 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4152 aarch64_record.regcache = NULL;
4153 aarch64_record.this_addr = 0;
4154 aarch64_record.gdbarch = gdbarch;
4156 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4157 aarch64_record.aarch64_insn = 0xf9800020;
4158 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4159 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4160 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4161 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4163 deallocate_reg_mem (&aarch64_record);
4166 } // namespace selftests
4167 #endif /* GDB_SELF_TEST */
4169 /* Parse the current instruction and record the values of the registers and
4170 memory that will be changed in current instruction to record_arch_list
4171 return -1 if something is wrong. */
4174 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4175 CORE_ADDR insn_addr)
4177 uint32_t rec_no = 0;
4178 uint8_t insn_size = 4;
4180 gdb_byte buf[insn_size];
4181 insn_decode_record aarch64_record;
4183 memset (&buf[0], 0, insn_size);
4184 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4185 target_read_memory (insn_addr, &buf[0], insn_size);
4186 aarch64_record.aarch64_insn
4187 = (uint32_t) extract_unsigned_integer (&buf[0],
4189 gdbarch_byte_order (gdbarch));
4190 aarch64_record.regcache = regcache;
4191 aarch64_record.this_addr = insn_addr;
4192 aarch64_record.gdbarch = gdbarch;
4194 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4195 if (ret == AARCH64_RECORD_UNSUPPORTED)
4197 printf_unfiltered (_("Process record does not support instruction "
4198 "0x%0x at address %s.\n"),
4199 aarch64_record.aarch64_insn,
4200 paddress (gdbarch, insn_addr));
4206 /* Record registers. */
4207 record_full_arch_list_add_reg (aarch64_record.regcache,
4209 /* Always record register CPSR. */
4210 record_full_arch_list_add_reg (aarch64_record.regcache,
4211 AARCH64_CPSR_REGNUM);
4212 if (aarch64_record.aarch64_regs)
4213 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4214 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4215 aarch64_record.aarch64_regs[rec_no]))
4218 /* Record memories. */
4219 if (aarch64_record.aarch64_mems)
4220 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4221 if (record_full_arch_list_add_mem
4222 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4223 aarch64_record.aarch64_mems[rec_no].len))
4226 if (record_full_arch_list_add_end ())
4230 deallocate_reg_mem (&aarch64_record);