1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2019 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
37 #include "dwarf2-frame.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
46 #include "common/selftest.h"
48 #include "aarch64-tdep.h"
49 #include "aarch64-ravenscar-thread.h"
52 #include "elf/aarch64.h"
54 #include "common/vec.h"
57 #include "record-full.h"
58 #include "arch/aarch64-insn.h"
60 #include "opcode/aarch64.h"
63 #define submask(x) ((1L << ((x) + 1)) - 1)
64 #define bit(obj,st) (((obj) >> (st)) & 1)
65 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
67 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
69 #define HA_MAX_NUM_FLDS 4
71 /* All possible aarch64 target descriptors. */
72 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/];
74 /* The standard register names, and all the valid aliases for them. */
77 const char *const name;
79 } aarch64_register_aliases[] =
81 /* 64-bit register names. */
82 {"fp", AARCH64_FP_REGNUM},
83 {"lr", AARCH64_LR_REGNUM},
84 {"sp", AARCH64_SP_REGNUM},
86 /* 32-bit register names. */
87 {"w0", AARCH64_X0_REGNUM + 0},
88 {"w1", AARCH64_X0_REGNUM + 1},
89 {"w2", AARCH64_X0_REGNUM + 2},
90 {"w3", AARCH64_X0_REGNUM + 3},
91 {"w4", AARCH64_X0_REGNUM + 4},
92 {"w5", AARCH64_X0_REGNUM + 5},
93 {"w6", AARCH64_X0_REGNUM + 6},
94 {"w7", AARCH64_X0_REGNUM + 7},
95 {"w8", AARCH64_X0_REGNUM + 8},
96 {"w9", AARCH64_X0_REGNUM + 9},
97 {"w10", AARCH64_X0_REGNUM + 10},
98 {"w11", AARCH64_X0_REGNUM + 11},
99 {"w12", AARCH64_X0_REGNUM + 12},
100 {"w13", AARCH64_X0_REGNUM + 13},
101 {"w14", AARCH64_X0_REGNUM + 14},
102 {"w15", AARCH64_X0_REGNUM + 15},
103 {"w16", AARCH64_X0_REGNUM + 16},
104 {"w17", AARCH64_X0_REGNUM + 17},
105 {"w18", AARCH64_X0_REGNUM + 18},
106 {"w19", AARCH64_X0_REGNUM + 19},
107 {"w20", AARCH64_X0_REGNUM + 20},
108 {"w21", AARCH64_X0_REGNUM + 21},
109 {"w22", AARCH64_X0_REGNUM + 22},
110 {"w23", AARCH64_X0_REGNUM + 23},
111 {"w24", AARCH64_X0_REGNUM + 24},
112 {"w25", AARCH64_X0_REGNUM + 25},
113 {"w26", AARCH64_X0_REGNUM + 26},
114 {"w27", AARCH64_X0_REGNUM + 27},
115 {"w28", AARCH64_X0_REGNUM + 28},
116 {"w29", AARCH64_X0_REGNUM + 29},
117 {"w30", AARCH64_X0_REGNUM + 30},
120 {"ip0", AARCH64_X0_REGNUM + 16},
121 {"ip1", AARCH64_X0_REGNUM + 17}
124 /* The required core 'R' registers. */
125 static const char *const aarch64_r_register_names[] =
127 /* These registers must appear in consecutive RAW register number
128 order and they must begin with AARCH64_X0_REGNUM! */
129 "x0", "x1", "x2", "x3",
130 "x4", "x5", "x6", "x7",
131 "x8", "x9", "x10", "x11",
132 "x12", "x13", "x14", "x15",
133 "x16", "x17", "x18", "x19",
134 "x20", "x21", "x22", "x23",
135 "x24", "x25", "x26", "x27",
136 "x28", "x29", "x30", "sp",
140 /* The FP/SIMD 'V' registers. */
141 static const char *const aarch64_v_register_names[] =
143 /* These registers must appear in consecutive RAW register number
144 order and they must begin with AARCH64_V0_REGNUM! */
145 "v0", "v1", "v2", "v3",
146 "v4", "v5", "v6", "v7",
147 "v8", "v9", "v10", "v11",
148 "v12", "v13", "v14", "v15",
149 "v16", "v17", "v18", "v19",
150 "v20", "v21", "v22", "v23",
151 "v24", "v25", "v26", "v27",
152 "v28", "v29", "v30", "v31",
157 /* The SVE 'Z' and 'P' registers. */
158 static const char *const aarch64_sve_register_names[] =
160 /* These registers must appear in consecutive RAW register number
161 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
162 "z0", "z1", "z2", "z3",
163 "z4", "z5", "z6", "z7",
164 "z8", "z9", "z10", "z11",
165 "z12", "z13", "z14", "z15",
166 "z16", "z17", "z18", "z19",
167 "z20", "z21", "z22", "z23",
168 "z24", "z25", "z26", "z27",
169 "z28", "z29", "z30", "z31",
171 "p0", "p1", "p2", "p3",
172 "p4", "p5", "p6", "p7",
173 "p8", "p9", "p10", "p11",
174 "p12", "p13", "p14", "p15",
178 static const char *const aarch64_pauth_register_names[] =
180 /* Authentication mask for data pointer. */
182 /* Authentication mask for code pointer. */
186 /* AArch64 prologue cache structure. */
187 struct aarch64_prologue_cache
189 /* The program counter at the start of the function. It is used to
190 identify this frame as a prologue frame. */
193 /* The program counter at the time this frame was created; i.e. where
194 this function was called from. It is used to identify this frame as a
198 /* The stack pointer at the time this frame was created; i.e. the
199 caller's stack pointer when this function was called. It is used
200 to identify this frame. */
203 /* Is the target available to read from? */
206 /* The frame base for this frame is just prev_sp - frame size.
207 FRAMESIZE is the distance from the frame pointer to the
208 initial stack pointer. */
211 /* The register used to hold the frame pointer for this frame. */
214 /* Saved register offsets. */
215 struct trad_frame_saved_reg *saved_regs;
219 show_aarch64_debug (struct ui_file *file, int from_tty,
220 struct cmd_list_element *c, const char *value)
222 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
227 /* Abstract instruction reader. */
229 class abstract_instruction_reader
232 /* Read in one instruction. */
233 virtual ULONGEST read (CORE_ADDR memaddr, int len,
234 enum bfd_endian byte_order) = 0;
237 /* Instruction reader from real target. */
239 class instruction_reader : public abstract_instruction_reader
242 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
245 return read_code_unsigned_integer (memaddr, len, byte_order);
251 /* Analyze a prologue, looking for a recognizable stack frame
252 and frame pointer. Scan until we encounter a store that could
253 clobber the stack frame unexpectedly, or an unknown instruction. */
256 aarch64_analyze_prologue (struct gdbarch *gdbarch,
257 CORE_ADDR start, CORE_ADDR limit,
258 struct aarch64_prologue_cache *cache,
259 abstract_instruction_reader& reader)
261 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
263 /* Track X registers and D registers in prologue. */
264 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
266 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
267 regs[i] = pv_register (i, 0);
268 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
270 for (; start < limit; start += 4)
275 insn = reader.read (start, 4, byte_order_for_code);
277 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
280 if (inst.opcode->iclass == addsub_imm
281 && (inst.opcode->op == OP_ADD
282 || strcmp ("sub", inst.opcode->name) == 0))
284 unsigned rd = inst.operands[0].reg.regno;
285 unsigned rn = inst.operands[1].reg.regno;
287 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
288 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
289 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
290 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
292 if (inst.opcode->op == OP_ADD)
294 regs[rd] = pv_add_constant (regs[rn],
295 inst.operands[2].imm.value);
299 regs[rd] = pv_add_constant (regs[rn],
300 -inst.operands[2].imm.value);
303 else if (inst.opcode->iclass == pcreladdr
304 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
306 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
307 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
309 regs[inst.operands[0].reg.regno] = pv_unknown ();
311 else if (inst.opcode->iclass == branch_imm)
313 /* Stop analysis on branch. */
316 else if (inst.opcode->iclass == condbranch)
318 /* Stop analysis on branch. */
321 else if (inst.opcode->iclass == branch_reg)
323 /* Stop analysis on branch. */
326 else if (inst.opcode->iclass == compbranch)
328 /* Stop analysis on branch. */
331 else if (inst.opcode->op == OP_MOVZ)
333 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
334 regs[inst.operands[0].reg.regno] = pv_unknown ();
336 else if (inst.opcode->iclass == log_shift
337 && strcmp (inst.opcode->name, "orr") == 0)
339 unsigned rd = inst.operands[0].reg.regno;
340 unsigned rn = inst.operands[1].reg.regno;
341 unsigned rm = inst.operands[2].reg.regno;
343 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
344 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
345 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
347 if (inst.operands[2].shifter.amount == 0
348 && rn == AARCH64_SP_REGNUM)
354 debug_printf ("aarch64: prologue analysis gave up "
355 "addr=%s opcode=0x%x (orr x register)\n",
356 core_addr_to_string_nz (start), insn);
361 else if (inst.opcode->op == OP_STUR)
363 unsigned rt = inst.operands[0].reg.regno;
364 unsigned rn = inst.operands[1].addr.base_regno;
366 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
368 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
369 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
370 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
371 gdb_assert (!inst.operands[1].addr.offset.is_reg);
373 stack.store (pv_add_constant (regs[rn],
374 inst.operands[1].addr.offset.imm),
375 is64 ? 8 : 4, regs[rt]);
377 else if ((inst.opcode->iclass == ldstpair_off
378 || (inst.opcode->iclass == ldstpair_indexed
379 && inst.operands[2].addr.preind))
380 && strcmp ("stp", inst.opcode->name) == 0)
382 /* STP with addressing mode Pre-indexed and Base register. */
385 unsigned rn = inst.operands[2].addr.base_regno;
386 int32_t imm = inst.operands[2].addr.offset.imm;
388 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
389 || inst.operands[0].type == AARCH64_OPND_Ft);
390 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
391 || inst.operands[1].type == AARCH64_OPND_Ft2);
392 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
393 gdb_assert (!inst.operands[2].addr.offset.is_reg);
395 /* If recording this store would invalidate the store area
396 (perhaps because rn is not known) then we should abandon
397 further prologue analysis. */
398 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
401 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
404 rt1 = inst.operands[0].reg.regno;
405 rt2 = inst.operands[1].reg.regno;
406 if (inst.operands[0].type == AARCH64_OPND_Ft)
408 /* Only bottom 64-bit of each V register (D register) need
410 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
411 rt1 += AARCH64_X_REGISTER_COUNT;
412 rt2 += AARCH64_X_REGISTER_COUNT;
415 stack.store (pv_add_constant (regs[rn], imm), 8,
417 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
420 if (inst.operands[2].addr.writeback)
421 regs[rn] = pv_add_constant (regs[rn], imm);
424 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
425 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
426 && (inst.opcode->op == OP_STR_POS
427 || inst.opcode->op == OP_STRF_POS)))
428 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
429 && strcmp ("str", inst.opcode->name) == 0)
431 /* STR (immediate) */
432 unsigned int rt = inst.operands[0].reg.regno;
433 int32_t imm = inst.operands[1].addr.offset.imm;
434 unsigned int rn = inst.operands[1].addr.base_regno;
436 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
437 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
438 || inst.operands[0].type == AARCH64_OPND_Ft);
440 if (inst.operands[0].type == AARCH64_OPND_Ft)
442 /* Only bottom 64-bit of each V register (D register) need
444 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
445 rt += AARCH64_X_REGISTER_COUNT;
448 stack.store (pv_add_constant (regs[rn], imm),
449 is64 ? 8 : 4, regs[rt]);
450 if (inst.operands[1].addr.writeback)
451 regs[rn] = pv_add_constant (regs[rn], imm);
453 else if (inst.opcode->iclass == testbranch)
455 /* Stop analysis on branch. */
462 debug_printf ("aarch64: prologue analysis gave up addr=%s"
464 core_addr_to_string_nz (start), insn);
473 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
475 /* Frame pointer is fp. Frame size is constant. */
476 cache->framereg = AARCH64_FP_REGNUM;
477 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
479 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
481 /* Try the stack pointer. */
482 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
483 cache->framereg = AARCH64_SP_REGNUM;
487 /* We're just out of luck. We don't know where the frame is. */
488 cache->framereg = -1;
489 cache->framesize = 0;
492 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
496 if (stack.find_reg (gdbarch, i, &offset))
497 cache->saved_regs[i].addr = offset;
500 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
502 int regnum = gdbarch_num_regs (gdbarch);
505 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
507 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
514 aarch64_analyze_prologue (struct gdbarch *gdbarch,
515 CORE_ADDR start, CORE_ADDR limit,
516 struct aarch64_prologue_cache *cache)
518 instruction_reader reader;
520 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
526 namespace selftests {
528 /* Instruction reader from manually cooked instruction sequences. */
530 class instruction_reader_test : public abstract_instruction_reader
533 template<size_t SIZE>
534 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
535 : m_insns (insns), m_insns_size (SIZE)
538 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
541 SELF_CHECK (len == 4);
542 SELF_CHECK (memaddr % 4 == 0);
543 SELF_CHECK (memaddr / 4 < m_insns_size);
545 return m_insns[memaddr / 4];
549 const uint32_t *m_insns;
554 aarch64_analyze_prologue_test (void)
556 struct gdbarch_info info;
558 gdbarch_info_init (&info);
559 info.bfd_arch_info = bfd_scan_arch ("aarch64");
561 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
562 SELF_CHECK (gdbarch != NULL);
564 /* Test the simple prologue in which frame pointer is used. */
566 struct aarch64_prologue_cache cache;
567 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
569 static const uint32_t insns[] = {
570 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
571 0x910003fd, /* mov x29, sp */
572 0x97ffffe6, /* bl 0x400580 */
574 instruction_reader_test reader (insns);
576 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
577 SELF_CHECK (end == 4 * 2);
579 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
580 SELF_CHECK (cache.framesize == 272);
582 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
584 if (i == AARCH64_FP_REGNUM)
585 SELF_CHECK (cache.saved_regs[i].addr == -272);
586 else if (i == AARCH64_LR_REGNUM)
587 SELF_CHECK (cache.saved_regs[i].addr == -264);
589 SELF_CHECK (cache.saved_regs[i].addr == -1);
592 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
594 int regnum = gdbarch_num_regs (gdbarch);
596 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
601 /* Test a prologue in which STR is used and frame pointer is not
604 struct aarch64_prologue_cache cache;
605 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
607 static const uint32_t insns[] = {
608 0xf81d0ff3, /* str x19, [sp, #-48]! */
609 0xb9002fe0, /* str w0, [sp, #44] */
610 0xf90013e1, /* str x1, [sp, #32]*/
611 0xfd000fe0, /* str d0, [sp, #24] */
612 0xaa0203f3, /* mov x19, x2 */
613 0xf94013e0, /* ldr x0, [sp, #32] */
615 instruction_reader_test reader (insns);
617 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
619 SELF_CHECK (end == 4 * 5);
621 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
622 SELF_CHECK (cache.framesize == 48);
624 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
627 SELF_CHECK (cache.saved_regs[i].addr == -16);
629 SELF_CHECK (cache.saved_regs[i].addr == -48);
631 SELF_CHECK (cache.saved_regs[i].addr == -1);
634 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
636 int regnum = gdbarch_num_regs (gdbarch);
639 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
642 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
647 } // namespace selftests
648 #endif /* GDB_SELF_TEST */
650 /* Implement the "skip_prologue" gdbarch method. */
653 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
655 CORE_ADDR func_addr, limit_pc;
657 /* See if we can determine the end of the prologue via the symbol
658 table. If so, then return either PC, or the PC after the
659 prologue, whichever is greater. */
660 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
662 CORE_ADDR post_prologue_pc
663 = skip_prologue_using_sal (gdbarch, func_addr);
665 if (post_prologue_pc != 0)
666 return std::max (pc, post_prologue_pc);
669 /* Can't determine prologue from the symbol table, need to examine
672 /* Find an upper limit on the function prologue using the debug
673 information. If the debug information could not be used to
674 provide that bound, then use an arbitrary large number as the
676 limit_pc = skip_prologue_using_sal (gdbarch, pc);
678 limit_pc = pc + 128; /* Magic. */
680 /* Try disassembling prologue. */
681 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
684 /* Scan the function prologue for THIS_FRAME and populate the prologue
688 aarch64_scan_prologue (struct frame_info *this_frame,
689 struct aarch64_prologue_cache *cache)
691 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
692 CORE_ADDR prologue_start;
693 CORE_ADDR prologue_end;
694 CORE_ADDR prev_pc = get_frame_pc (this_frame);
695 struct gdbarch *gdbarch = get_frame_arch (this_frame);
697 cache->prev_pc = prev_pc;
699 /* Assume we do not find a frame. */
700 cache->framereg = -1;
701 cache->framesize = 0;
703 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
706 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
710 /* No line info so use the current PC. */
711 prologue_end = prev_pc;
713 else if (sal.end < prologue_end)
715 /* The next line begins after the function end. */
716 prologue_end = sal.end;
719 prologue_end = std::min (prologue_end, prev_pc);
720 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
726 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
730 cache->framereg = AARCH64_FP_REGNUM;
731 cache->framesize = 16;
732 cache->saved_regs[29].addr = 0;
733 cache->saved_regs[30].addr = 8;
737 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
738 function may throw an exception if the inferior's registers or memory is
742 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
743 struct aarch64_prologue_cache *cache)
745 CORE_ADDR unwound_fp;
748 aarch64_scan_prologue (this_frame, cache);
750 if (cache->framereg == -1)
753 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
757 cache->prev_sp = unwound_fp + cache->framesize;
759 /* Calculate actual addresses of saved registers using offsets
760 determined by aarch64_analyze_prologue. */
761 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
762 if (trad_frame_addr_p (cache->saved_regs, reg))
763 cache->saved_regs[reg].addr += cache->prev_sp;
765 cache->func = get_frame_func (this_frame);
767 cache->available_p = 1;
770 /* Allocate and fill in *THIS_CACHE with information about the prologue of
771 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
772 Return a pointer to the current aarch64_prologue_cache in
775 static struct aarch64_prologue_cache *
776 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
778 struct aarch64_prologue_cache *cache;
780 if (*this_cache != NULL)
781 return (struct aarch64_prologue_cache *) *this_cache;
783 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
784 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
789 aarch64_make_prologue_cache_1 (this_frame, cache);
791 CATCH (ex, RETURN_MASK_ERROR)
793 if (ex.error != NOT_AVAILABLE_ERROR)
794 throw_exception (ex);
801 /* Implement the "stop_reason" frame_unwind method. */
803 static enum unwind_stop_reason
804 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
807 struct aarch64_prologue_cache *cache
808 = aarch64_make_prologue_cache (this_frame, this_cache);
810 if (!cache->available_p)
811 return UNWIND_UNAVAILABLE;
813 /* Halt the backtrace at "_start". */
814 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
815 return UNWIND_OUTERMOST;
817 /* We've hit a wall, stop. */
818 if (cache->prev_sp == 0)
819 return UNWIND_OUTERMOST;
821 return UNWIND_NO_REASON;
824 /* Our frame ID for a normal frame is the current function's starting
825 PC and the caller's SP when we were called. */
828 aarch64_prologue_this_id (struct frame_info *this_frame,
829 void **this_cache, struct frame_id *this_id)
831 struct aarch64_prologue_cache *cache
832 = aarch64_make_prologue_cache (this_frame, this_cache);
834 if (!cache->available_p)
835 *this_id = frame_id_build_unavailable_stack (cache->func);
837 *this_id = frame_id_build (cache->prev_sp, cache->func);
840 /* Implement the "prev_register" frame_unwind method. */
842 static struct value *
843 aarch64_prologue_prev_register (struct frame_info *this_frame,
844 void **this_cache, int prev_regnum)
846 struct aarch64_prologue_cache *cache
847 = aarch64_make_prologue_cache (this_frame, this_cache);
849 /* If we are asked to unwind the PC, then we need to return the LR
850 instead. The prologue may save PC, but it will point into this
851 frame's prologue, not the next frame's resume location. */
852 if (prev_regnum == AARCH64_PC_REGNUM)
856 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
857 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
860 /* SP is generally not saved to the stack, but this frame is
861 identified by the next frame's stack pointer at the time of the
862 call. The value was already reconstructed into PREV_SP. */
875 if (prev_regnum == AARCH64_SP_REGNUM)
876 return frame_unwind_got_constant (this_frame, prev_regnum,
879 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
883 /* AArch64 prologue unwinder. */
884 struct frame_unwind aarch64_prologue_unwind =
887 aarch64_prologue_frame_unwind_stop_reason,
888 aarch64_prologue_this_id,
889 aarch64_prologue_prev_register,
891 default_frame_sniffer
894 /* Allocate and fill in *THIS_CACHE with information about the prologue of
895 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
896 Return a pointer to the current aarch64_prologue_cache in
899 static struct aarch64_prologue_cache *
900 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
902 struct aarch64_prologue_cache *cache;
904 if (*this_cache != NULL)
905 return (struct aarch64_prologue_cache *) *this_cache;
907 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
908 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
913 cache->prev_sp = get_frame_register_unsigned (this_frame,
915 cache->prev_pc = get_frame_pc (this_frame);
916 cache->available_p = 1;
918 CATCH (ex, RETURN_MASK_ERROR)
920 if (ex.error != NOT_AVAILABLE_ERROR)
921 throw_exception (ex);
928 /* Implement the "stop_reason" frame_unwind method. */
930 static enum unwind_stop_reason
931 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
934 struct aarch64_prologue_cache *cache
935 = aarch64_make_stub_cache (this_frame, this_cache);
937 if (!cache->available_p)
938 return UNWIND_UNAVAILABLE;
940 return UNWIND_NO_REASON;
943 /* Our frame ID for a stub frame is the current SP and LR. */
946 aarch64_stub_this_id (struct frame_info *this_frame,
947 void **this_cache, struct frame_id *this_id)
949 struct aarch64_prologue_cache *cache
950 = aarch64_make_stub_cache (this_frame, this_cache);
952 if (cache->available_p)
953 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
955 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
958 /* Implement the "sniffer" frame_unwind method. */
961 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
962 struct frame_info *this_frame,
963 void **this_prologue_cache)
965 CORE_ADDR addr_in_block;
968 addr_in_block = get_frame_address_in_block (this_frame);
969 if (in_plt_section (addr_in_block)
970 /* We also use the stub winder if the target memory is unreadable
971 to avoid having the prologue unwinder trying to read it. */
972 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
978 /* AArch64 stub unwinder. */
979 struct frame_unwind aarch64_stub_unwind =
982 aarch64_stub_frame_unwind_stop_reason,
983 aarch64_stub_this_id,
984 aarch64_prologue_prev_register,
986 aarch64_stub_unwind_sniffer
989 /* Return the frame base address of *THIS_FRAME. */
992 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
994 struct aarch64_prologue_cache *cache
995 = aarch64_make_prologue_cache (this_frame, this_cache);
997 return cache->prev_sp - cache->framesize;
1000 /* AArch64 default frame base information. */
1001 struct frame_base aarch64_normal_base =
1003 &aarch64_prologue_unwind,
1004 aarch64_normal_frame_base,
1005 aarch64_normal_frame_base,
1006 aarch64_normal_frame_base
1009 /* Return the value of the REGNUM register in the previous frame of
1012 static struct value *
1013 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1014 void **this_cache, int regnum)
1020 case AARCH64_PC_REGNUM:
1021 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1022 return frame_unwind_got_constant (this_frame, regnum, lr);
1025 internal_error (__FILE__, __LINE__,
1026 _("Unexpected register %d"), regnum);
1030 /* Implement the "init_reg" dwarf2_frame_ops method. */
1033 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1034 struct dwarf2_frame_state_reg *reg,
1035 struct frame_info *this_frame)
1039 case AARCH64_PC_REGNUM:
1040 reg->how = DWARF2_FRAME_REG_FN;
1041 reg->loc.fn = aarch64_dwarf2_prev_register;
1043 case AARCH64_SP_REGNUM:
1044 reg->how = DWARF2_FRAME_REG_CFA;
1049 /* When arguments must be pushed onto the stack, they go on in reverse
1050 order. The code below implements a FILO (stack) to do this. */
1054 /* Value to pass on stack. It can be NULL if this item is for stack
1056 const gdb_byte *data;
1058 /* Size in bytes of value to pass on stack. */
1062 DEF_VEC_O (stack_item_t);
1064 /* Return the alignment (in bytes) of the given type. */
1067 aarch64_type_align (struct type *t)
1073 t = check_typedef (t);
1074 switch (TYPE_CODE (t))
1077 /* Should never happen. */
1078 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1082 case TYPE_CODE_ENUM:
1086 case TYPE_CODE_RANGE:
1087 case TYPE_CODE_BITSTRING:
1089 case TYPE_CODE_RVALUE_REF:
1090 case TYPE_CODE_CHAR:
1091 case TYPE_CODE_BOOL:
1092 return TYPE_LENGTH (t);
1094 case TYPE_CODE_ARRAY:
1095 if (TYPE_VECTOR (t))
1097 /* Use the natural alignment for vector types (the same for
1098 scalar type), but the maximum alignment is 128-bit. */
1099 if (TYPE_LENGTH (t) > 16)
1102 return TYPE_LENGTH (t);
1105 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1106 case TYPE_CODE_COMPLEX:
1107 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1109 case TYPE_CODE_STRUCT:
1110 case TYPE_CODE_UNION:
1112 for (n = 0; n < TYPE_NFIELDS (t); n++)
1114 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1122 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1124 Return the number of register required, or -1 on failure.
1126 When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1127 to the element, else fail if the type of this element does not match the
1131 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1132 struct type **fundamental_type)
1134 if (type == nullptr)
1137 switch (TYPE_CODE (type))
1140 if (TYPE_LENGTH (type) > 16)
1143 if (*fundamental_type == nullptr)
1144 *fundamental_type = type;
1145 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1146 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1151 case TYPE_CODE_COMPLEX:
1153 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1154 if (TYPE_LENGTH (target_type) > 16)
1157 if (*fundamental_type == nullptr)
1158 *fundamental_type = target_type;
1159 else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1160 || TYPE_CODE (target_type) != TYPE_CODE (*fundamental_type))
1166 case TYPE_CODE_ARRAY:
1168 if (TYPE_VECTOR (type))
1170 if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1173 if (*fundamental_type == nullptr)
1174 *fundamental_type = type;
1175 else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1176 || TYPE_CODE (type) != TYPE_CODE (*fundamental_type))
1183 struct type *target_type = TYPE_TARGET_TYPE (type);
1184 int count = aapcs_is_vfp_call_or_return_candidate_1
1185 (target_type, fundamental_type);
1190 count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1195 case TYPE_CODE_STRUCT:
1196 case TYPE_CODE_UNION:
1200 for (int i = 0; i < TYPE_NFIELDS (type); i++)
1202 /* Ignore any static fields. */
1203 if (field_is_static (&TYPE_FIELD (type, i)))
1206 struct type *member = check_typedef (TYPE_FIELD_TYPE (type, i));
1208 int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1209 (member, fundamental_type);
1210 if (sub_count == -1)
1215 /* Ensure there is no padding between the fields (allowing for empty
1216 zero length structs) */
1217 int ftype_length = (*fundamental_type == nullptr)
1218 ? 0 : TYPE_LENGTH (*fundamental_type);
1219 if (count * ftype_length != TYPE_LENGTH (type))
1232 /* Return true if an argument, whose type is described by TYPE, can be passed or
1233 returned in simd/fp registers, providing enough parameter passing registers
1234 are available. This is as described in the AAPCS64.
1236 Upon successful return, *COUNT returns the number of needed registers,
1237 *FUNDAMENTAL_TYPE contains the type of those registers.
1239 Candidate as per the AAPCS64 5.4.2.C is either a:
1242 - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1243 all the members are floats and has at most 4 members.
1244 - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1245 all the members are short vectors and has at most 4 members.
1248 Note that HFAs and HVAs can include nested structures and arrays. */
1251 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1252 struct type **fundamental_type)
1254 if (type == nullptr)
1257 *fundamental_type = nullptr;
1259 int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1262 if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1271 /* AArch64 function call information structure. */
1272 struct aarch64_call_info
1274 /* the current argument number. */
1277 /* The next general purpose register number, equivalent to NGRN as
1278 described in the AArch64 Procedure Call Standard. */
1281 /* The next SIMD and floating point register number, equivalent to
1282 NSRN as described in the AArch64 Procedure Call Standard. */
1285 /* The next stacked argument address, equivalent to NSAA as
1286 described in the AArch64 Procedure Call Standard. */
1289 /* Stack item vector. */
1290 VEC(stack_item_t) *si;
1293 /* Pass a value in a sequence of consecutive X registers. The caller
1294 is responsbile for ensuring sufficient registers are available. */
1297 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1298 struct aarch64_call_info *info, struct type *type,
1301 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1302 int len = TYPE_LENGTH (type);
1303 enum type_code typecode = TYPE_CODE (type);
1304 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1305 const bfd_byte *buf = value_contents (arg);
1311 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1312 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1316 /* Adjust sub-word struct/union args when big-endian. */
1317 if (byte_order == BFD_ENDIAN_BIG
1318 && partial_len < X_REGISTER_SIZE
1319 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1320 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1324 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1325 gdbarch_register_name (gdbarch, regnum),
1326 phex (regval, X_REGISTER_SIZE));
1328 regcache_cooked_write_unsigned (regcache, regnum, regval);
1335 /* Attempt to marshall a value in a V register. Return 1 if
1336 successful, or 0 if insufficient registers are available. This
1337 function, unlike the equivalent pass_in_x() function does not
1338 handle arguments spread across multiple registers. */
1341 pass_in_v (struct gdbarch *gdbarch,
1342 struct regcache *regcache,
1343 struct aarch64_call_info *info,
1344 int len, const bfd_byte *buf)
1348 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1349 /* Enough space for a full vector register. */
1350 gdb_byte reg[register_size (gdbarch, regnum)];
1351 gdb_assert (len <= sizeof (reg));
1356 memset (reg, 0, sizeof (reg));
1357 /* PCS C.1, the argument is allocated to the least significant
1358 bits of V register. */
1359 memcpy (reg, buf, len);
1360 regcache->cooked_write (regnum, reg);
1364 debug_printf ("arg %d in %s\n", info->argnum,
1365 gdbarch_register_name (gdbarch, regnum));
1373 /* Marshall an argument onto the stack. */
1376 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1379 const bfd_byte *buf = value_contents (arg);
1380 int len = TYPE_LENGTH (type);
1386 align = aarch64_type_align (type);
1388 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1389 Natural alignment of the argument's type. */
1390 align = align_up (align, 8);
1392 /* The AArch64 PCS requires at most doubleword alignment. */
1398 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1404 VEC_safe_push (stack_item_t, info->si, &item);
1407 if (info->nsaa & (align - 1))
1409 /* Push stack alignment padding. */
1410 int pad = align - (info->nsaa & (align - 1));
1415 VEC_safe_push (stack_item_t, info->si, &item);
1420 /* Marshall an argument into a sequence of one or more consecutive X
1421 registers or, if insufficient X registers are available then onto
1425 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1426 struct aarch64_call_info *info, struct type *type,
1429 int len = TYPE_LENGTH (type);
1430 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1432 /* PCS C.13 - Pass in registers if we have enough spare */
1433 if (info->ngrn + nregs <= 8)
1435 pass_in_x (gdbarch, regcache, info, type, arg);
1436 info->ngrn += nregs;
1441 pass_on_stack (info, type, arg);
1445 /* Pass a value, which is of type arg_type, in a V register. Assumes value is a
1446 aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1447 registers. A return value of false is an error state as the value will have
1448 been partially passed to the stack. */
1450 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1451 struct aarch64_call_info *info, struct type *arg_type,
1454 switch (TYPE_CODE (arg_type))
1457 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1458 value_contents (arg));
1461 case TYPE_CODE_COMPLEX:
1463 const bfd_byte *buf = value_contents (arg);
1464 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1466 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1470 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1471 buf + TYPE_LENGTH (target_type));
1474 case TYPE_CODE_ARRAY:
1475 if (TYPE_VECTOR (arg_type))
1476 return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1477 value_contents (arg));
1480 case TYPE_CODE_STRUCT:
1481 case TYPE_CODE_UNION:
1482 for (int i = 0; i < TYPE_NFIELDS (arg_type); i++)
1484 /* Don't include static fields. */
1485 if (field_is_static (&TYPE_FIELD (arg_type, i)))
1488 struct value *field = value_primitive_field (arg, 0, i, arg_type);
1489 struct type *field_type = check_typedef (value_type (field));
1491 if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1502 /* Implement the "push_dummy_call" gdbarch method. */
1505 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1506 struct regcache *regcache, CORE_ADDR bp_addr,
1508 struct value **args, CORE_ADDR sp,
1509 function_call_return_method return_method,
1510 CORE_ADDR struct_addr)
1513 struct aarch64_call_info info;
1515 memset (&info, 0, sizeof (info));
1517 /* We need to know what the type of the called function is in order
1518 to determine the number of named/anonymous arguments for the
1519 actual argument placement, and the return type in order to handle
1520 return value correctly.
1522 The generic code above us views the decision of return in memory
1523 or return in registers as a two stage processes. The language
1524 handler is consulted first and may decide to return in memory (eg
1525 class with copy constructor returned by value), this will cause
1526 the generic code to allocate space AND insert an initial leading
1529 If the language code does not decide to pass in memory then the
1530 target code is consulted.
1532 If the language code decides to pass in memory we want to move
1533 the pointer inserted as the initial argument from the argument
1534 list and into X8, the conventional AArch64 struct return pointer
1537 /* Set the return address. For the AArch64, the return breakpoint
1538 is always at BP_ADDR. */
1539 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1541 /* If we were given an initial argument for the return slot, lose it. */
1542 if (return_method == return_method_hidden_param)
1548 /* The struct_return pointer occupies X8. */
1549 if (return_method != return_method_normal)
1553 debug_printf ("struct return in %s = 0x%s\n",
1554 gdbarch_register_name (gdbarch,
1555 AARCH64_STRUCT_RETURN_REGNUM),
1556 paddress (gdbarch, struct_addr));
1558 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1562 for (argnum = 0; argnum < nargs; argnum++)
1564 struct value *arg = args[argnum];
1565 struct type *arg_type, *fundamental_type;
1568 arg_type = check_typedef (value_type (arg));
1569 len = TYPE_LENGTH (arg_type);
1571 /* If arg can be passed in v registers as per the AAPCS64, then do so if
1572 if there are enough spare registers. */
1573 if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1576 if (info.nsrn + elements <= 8)
1578 /* We know that we have sufficient registers available therefore
1579 this will never need to fallback to the stack. */
1580 if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1582 gdb_assert_not_reached ("Failed to push args");
1587 pass_on_stack (&info, arg_type, arg);
1592 switch (TYPE_CODE (arg_type))
1595 case TYPE_CODE_BOOL:
1596 case TYPE_CODE_CHAR:
1597 case TYPE_CODE_RANGE:
1598 case TYPE_CODE_ENUM:
1601 /* Promote to 32 bit integer. */
1602 if (TYPE_UNSIGNED (arg_type))
1603 arg_type = builtin_type (gdbarch)->builtin_uint32;
1605 arg_type = builtin_type (gdbarch)->builtin_int32;
1606 arg = value_cast (arg_type, arg);
1608 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1611 case TYPE_CODE_STRUCT:
1612 case TYPE_CODE_ARRAY:
1613 case TYPE_CODE_UNION:
1616 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1617 invisible reference. */
1619 /* Allocate aligned storage. */
1620 sp = align_down (sp - len, 16);
1622 /* Write the real data into the stack. */
1623 write_memory (sp, value_contents (arg), len);
1625 /* Construct the indirection. */
1626 arg_type = lookup_pointer_type (arg_type);
1627 arg = value_from_pointer (arg_type, sp);
1628 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1631 /* PCS C.15 / C.18 multiple values pass. */
1632 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1636 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1641 /* Make sure stack retains 16 byte alignment. */
1643 sp -= 16 - (info.nsaa & 15);
1645 while (!VEC_empty (stack_item_t, info.si))
1647 stack_item_t *si = VEC_last (stack_item_t, info.si);
1650 if (si->data != NULL)
1651 write_memory (sp, si->data, si->len);
1652 VEC_pop (stack_item_t, info.si);
1655 VEC_free (stack_item_t, info.si);
1657 /* Finally, update the SP register. */
1658 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1663 /* Implement the "frame_align" gdbarch method. */
1666 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1668 /* Align the stack to sixteen bytes. */
1669 return sp & ~(CORE_ADDR) 15;
1672 /* Return the type for an AdvSISD Q register. */
1674 static struct type *
1675 aarch64_vnq_type (struct gdbarch *gdbarch)
1677 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1679 if (tdep->vnq_type == NULL)
1684 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1687 elem = builtin_type (gdbarch)->builtin_uint128;
1688 append_composite_type_field (t, "u", elem);
1690 elem = builtin_type (gdbarch)->builtin_int128;
1691 append_composite_type_field (t, "s", elem);
1696 return tdep->vnq_type;
1699 /* Return the type for an AdvSISD D register. */
1701 static struct type *
1702 aarch64_vnd_type (struct gdbarch *gdbarch)
1704 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1706 if (tdep->vnd_type == NULL)
1711 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1714 elem = builtin_type (gdbarch)->builtin_double;
1715 append_composite_type_field (t, "f", elem);
1717 elem = builtin_type (gdbarch)->builtin_uint64;
1718 append_composite_type_field (t, "u", elem);
1720 elem = builtin_type (gdbarch)->builtin_int64;
1721 append_composite_type_field (t, "s", elem);
1726 return tdep->vnd_type;
1729 /* Return the type for an AdvSISD S register. */
1731 static struct type *
1732 aarch64_vns_type (struct gdbarch *gdbarch)
1734 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1736 if (tdep->vns_type == NULL)
1741 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1744 elem = builtin_type (gdbarch)->builtin_float;
1745 append_composite_type_field (t, "f", elem);
1747 elem = builtin_type (gdbarch)->builtin_uint32;
1748 append_composite_type_field (t, "u", elem);
1750 elem = builtin_type (gdbarch)->builtin_int32;
1751 append_composite_type_field (t, "s", elem);
1756 return tdep->vns_type;
1759 /* Return the type for an AdvSISD H register. */
1761 static struct type *
1762 aarch64_vnh_type (struct gdbarch *gdbarch)
1764 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1766 if (tdep->vnh_type == NULL)
1771 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1774 elem = builtin_type (gdbarch)->builtin_uint16;
1775 append_composite_type_field (t, "u", elem);
1777 elem = builtin_type (gdbarch)->builtin_int16;
1778 append_composite_type_field (t, "s", elem);
1783 return tdep->vnh_type;
1786 /* Return the type for an AdvSISD B register. */
1788 static struct type *
1789 aarch64_vnb_type (struct gdbarch *gdbarch)
1791 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1793 if (tdep->vnb_type == NULL)
1798 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1801 elem = builtin_type (gdbarch)->builtin_uint8;
1802 append_composite_type_field (t, "u", elem);
1804 elem = builtin_type (gdbarch)->builtin_int8;
1805 append_composite_type_field (t, "s", elem);
1810 return tdep->vnb_type;
1813 /* Return the type for an AdvSISD V register. */
1815 static struct type *
1816 aarch64_vnv_type (struct gdbarch *gdbarch)
1818 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1820 if (tdep->vnv_type == NULL)
1822 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1825 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1826 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1827 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1828 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1829 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1834 return tdep->vnv_type;
1837 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1840 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1842 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1844 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1845 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1847 if (reg == AARCH64_DWARF_SP)
1848 return AARCH64_SP_REGNUM;
1850 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1851 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1853 if (reg == AARCH64_DWARF_SVE_VG)
1854 return AARCH64_SVE_VG_REGNUM;
1856 if (reg == AARCH64_DWARF_SVE_FFR)
1857 return AARCH64_SVE_FFR_REGNUM;
1859 if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
1860 return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
1862 if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
1863 return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
1865 if (tdep->has_pauth ())
1867 if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
1868 return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
1870 if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
1871 return tdep->pauth_ra_state_regnum;
1877 /* Implement the "print_insn" gdbarch method. */
1880 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1882 info->symbols = NULL;
1883 return default_print_insn (memaddr, info);
1886 /* AArch64 BRK software debug mode instruction.
1887 Note that AArch64 code is always little-endian.
1888 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1889 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1891 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1893 /* Extract from an array REGS containing the (raw) register state a
1894 function return value of type TYPE, and copy that, in virtual
1895 format, into VALBUF. */
1898 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1901 struct gdbarch *gdbarch = regs->arch ();
1902 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1904 struct type *fundamental_type;
1906 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1909 int len = TYPE_LENGTH (fundamental_type);
1911 for (int i = 0; i < elements; i++)
1913 int regno = AARCH64_V0_REGNUM + i;
1914 /* Enough space for a full vector register. */
1915 gdb_byte buf[register_size (gdbarch, regno)];
1916 gdb_assert (len <= sizeof (buf));
1920 debug_printf ("read HFA or HVA return value element %d from %s\n",
1922 gdbarch_register_name (gdbarch, regno));
1924 regs->cooked_read (regno, buf);
1926 memcpy (valbuf, buf, len);
1930 else if (TYPE_CODE (type) == TYPE_CODE_INT
1931 || TYPE_CODE (type) == TYPE_CODE_CHAR
1932 || TYPE_CODE (type) == TYPE_CODE_BOOL
1933 || TYPE_CODE (type) == TYPE_CODE_PTR
1934 || TYPE_IS_REFERENCE (type)
1935 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1937 /* If the type is a plain integer, then the access is
1938 straight-forward. Otherwise we have to play around a bit
1940 int len = TYPE_LENGTH (type);
1941 int regno = AARCH64_X0_REGNUM;
1946 /* By using store_unsigned_integer we avoid having to do
1947 anything special for small big-endian values. */
1948 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1949 store_unsigned_integer (valbuf,
1950 (len > X_REGISTER_SIZE
1951 ? X_REGISTER_SIZE : len), byte_order, tmp);
1952 len -= X_REGISTER_SIZE;
1953 valbuf += X_REGISTER_SIZE;
1958 /* For a structure or union the behaviour is as if the value had
1959 been stored to word-aligned memory and then loaded into
1960 registers with 64-bit load instruction(s). */
1961 int len = TYPE_LENGTH (type);
1962 int regno = AARCH64_X0_REGNUM;
1963 bfd_byte buf[X_REGISTER_SIZE];
1967 regs->cooked_read (regno++, buf);
1968 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1969 len -= X_REGISTER_SIZE;
1970 valbuf += X_REGISTER_SIZE;
1976 /* Will a function return an aggregate type in memory or in a
1977 register? Return 0 if an aggregate type can be returned in a
1978 register, 1 if it must be returned in memory. */
1981 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1983 type = check_typedef (type);
1985 struct type *fundamental_type;
1987 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
1990 /* v0-v7 are used to return values and one register is allocated
1991 for one member. However, HFA or HVA has at most four members. */
1995 if (TYPE_LENGTH (type) > 16)
1997 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1998 invisible reference. */
2006 /* Write into appropriate registers a function return value of type
2007 TYPE, given in virtual format. */
2010 aarch64_store_return_value (struct type *type, struct regcache *regs,
2011 const gdb_byte *valbuf)
2013 struct gdbarch *gdbarch = regs->arch ();
2014 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2016 struct type *fundamental_type;
2018 if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2021 int len = TYPE_LENGTH (fundamental_type);
2023 for (int i = 0; i < elements; i++)
2025 int regno = AARCH64_V0_REGNUM + i;
2026 /* Enough space for a full vector register. */
2027 gdb_byte tmpbuf[register_size (gdbarch, regno)];
2028 gdb_assert (len <= sizeof (tmpbuf));
2032 debug_printf ("write HFA or HVA return value element %d to %s\n",
2034 gdbarch_register_name (gdbarch, regno));
2037 memcpy (tmpbuf, valbuf,
2038 len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2039 regs->cooked_write (regno, tmpbuf);
2043 else if (TYPE_CODE (type) == TYPE_CODE_INT
2044 || TYPE_CODE (type) == TYPE_CODE_CHAR
2045 || TYPE_CODE (type) == TYPE_CODE_BOOL
2046 || TYPE_CODE (type) == TYPE_CODE_PTR
2047 || TYPE_IS_REFERENCE (type)
2048 || TYPE_CODE (type) == TYPE_CODE_ENUM)
2050 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2052 /* Values of one word or less are zero/sign-extended and
2054 bfd_byte tmpbuf[X_REGISTER_SIZE];
2055 LONGEST val = unpack_long (type, valbuf);
2057 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2058 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2062 /* Integral values greater than one word are stored in
2063 consecutive registers starting with r0. This will always
2064 be a multiple of the regiser size. */
2065 int len = TYPE_LENGTH (type);
2066 int regno = AARCH64_X0_REGNUM;
2070 regs->cooked_write (regno++, valbuf);
2071 len -= X_REGISTER_SIZE;
2072 valbuf += X_REGISTER_SIZE;
2078 /* For a structure or union the behaviour is as if the value had
2079 been stored to word-aligned memory and then loaded into
2080 registers with 64-bit load instruction(s). */
2081 int len = TYPE_LENGTH (type);
2082 int regno = AARCH64_X0_REGNUM;
2083 bfd_byte tmpbuf[X_REGISTER_SIZE];
2087 memcpy (tmpbuf, valbuf,
2088 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2089 regs->cooked_write (regno++, tmpbuf);
2090 len -= X_REGISTER_SIZE;
2091 valbuf += X_REGISTER_SIZE;
2096 /* Implement the "return_value" gdbarch method. */
2098 static enum return_value_convention
2099 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2100 struct type *valtype, struct regcache *regcache,
2101 gdb_byte *readbuf, const gdb_byte *writebuf)
2104 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2105 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2106 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2108 if (aarch64_return_in_memory (gdbarch, valtype))
2111 debug_printf ("return value in memory\n");
2112 return RETURN_VALUE_STRUCT_CONVENTION;
2117 aarch64_store_return_value (valtype, regcache, writebuf);
2120 aarch64_extract_return_value (valtype, regcache, readbuf);
2123 debug_printf ("return value in registers\n");
2125 return RETURN_VALUE_REGISTER_CONVENTION;
2128 /* Implement the "get_longjmp_target" gdbarch method. */
2131 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2134 gdb_byte buf[X_REGISTER_SIZE];
2135 struct gdbarch *gdbarch = get_frame_arch (frame);
2136 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2137 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2139 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2141 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2145 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2149 /* Implement the "gen_return_address" gdbarch method. */
2152 aarch64_gen_return_address (struct gdbarch *gdbarch,
2153 struct agent_expr *ax, struct axs_value *value,
2156 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2157 value->kind = axs_lvalue_register;
2158 value->u.reg = AARCH64_LR_REGNUM;
2162 /* Return the pseudo register name corresponding to register regnum. */
2165 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2167 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2169 static const char *const q_name[] =
2171 "q0", "q1", "q2", "q3",
2172 "q4", "q5", "q6", "q7",
2173 "q8", "q9", "q10", "q11",
2174 "q12", "q13", "q14", "q15",
2175 "q16", "q17", "q18", "q19",
2176 "q20", "q21", "q22", "q23",
2177 "q24", "q25", "q26", "q27",
2178 "q28", "q29", "q30", "q31",
2181 static const char *const d_name[] =
2183 "d0", "d1", "d2", "d3",
2184 "d4", "d5", "d6", "d7",
2185 "d8", "d9", "d10", "d11",
2186 "d12", "d13", "d14", "d15",
2187 "d16", "d17", "d18", "d19",
2188 "d20", "d21", "d22", "d23",
2189 "d24", "d25", "d26", "d27",
2190 "d28", "d29", "d30", "d31",
2193 static const char *const s_name[] =
2195 "s0", "s1", "s2", "s3",
2196 "s4", "s5", "s6", "s7",
2197 "s8", "s9", "s10", "s11",
2198 "s12", "s13", "s14", "s15",
2199 "s16", "s17", "s18", "s19",
2200 "s20", "s21", "s22", "s23",
2201 "s24", "s25", "s26", "s27",
2202 "s28", "s29", "s30", "s31",
2205 static const char *const h_name[] =
2207 "h0", "h1", "h2", "h3",
2208 "h4", "h5", "h6", "h7",
2209 "h8", "h9", "h10", "h11",
2210 "h12", "h13", "h14", "h15",
2211 "h16", "h17", "h18", "h19",
2212 "h20", "h21", "h22", "h23",
2213 "h24", "h25", "h26", "h27",
2214 "h28", "h29", "h30", "h31",
2217 static const char *const b_name[] =
2219 "b0", "b1", "b2", "b3",
2220 "b4", "b5", "b6", "b7",
2221 "b8", "b9", "b10", "b11",
2222 "b12", "b13", "b14", "b15",
2223 "b16", "b17", "b18", "b19",
2224 "b20", "b21", "b22", "b23",
2225 "b24", "b25", "b26", "b27",
2226 "b28", "b29", "b30", "b31",
2229 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2231 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2232 return q_name[p_regnum - AARCH64_Q0_REGNUM];
2234 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2235 return d_name[p_regnum - AARCH64_D0_REGNUM];
2237 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2238 return s_name[p_regnum - AARCH64_S0_REGNUM];
2240 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2241 return h_name[p_regnum - AARCH64_H0_REGNUM];
2243 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2244 return b_name[p_regnum - AARCH64_B0_REGNUM];
2246 if (tdep->has_sve ())
2248 static const char *const sve_v_name[] =
2250 "v0", "v1", "v2", "v3",
2251 "v4", "v5", "v6", "v7",
2252 "v8", "v9", "v10", "v11",
2253 "v12", "v13", "v14", "v15",
2254 "v16", "v17", "v18", "v19",
2255 "v20", "v21", "v22", "v23",
2256 "v24", "v25", "v26", "v27",
2257 "v28", "v29", "v30", "v31",
2260 if (p_regnum >= AARCH64_SVE_V0_REGNUM
2261 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2262 return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2265 /* RA_STATE is used for unwinding only. Do not assign it a name - this
2266 prevents it from being read by methods such as
2267 mi_cmd_trace_frame_collected. */
2268 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2271 internal_error (__FILE__, __LINE__,
2272 _("aarch64_pseudo_register_name: bad register number %d"),
2276 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2278 static struct type *
2279 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2281 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2283 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2285 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2286 return aarch64_vnq_type (gdbarch);
2288 if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2289 return aarch64_vnd_type (gdbarch);
2291 if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2292 return aarch64_vns_type (gdbarch);
2294 if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2295 return aarch64_vnh_type (gdbarch);
2297 if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2298 return aarch64_vnb_type (gdbarch);
2300 if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2301 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2302 return aarch64_vnv_type (gdbarch);
2304 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2305 return builtin_type (gdbarch)->builtin_uint64;
2307 internal_error (__FILE__, __LINE__,
2308 _("aarch64_pseudo_register_type: bad register number %d"),
2312 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2315 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2316 struct reggroup *group)
2318 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2320 int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2322 if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2323 return group == all_reggroup || group == vector_reggroup;
2324 else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2325 return (group == all_reggroup || group == vector_reggroup
2326 || group == float_reggroup);
2327 else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2328 return (group == all_reggroup || group == vector_reggroup
2329 || group == float_reggroup);
2330 else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2331 return group == all_reggroup || group == vector_reggroup;
2332 else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2333 return group == all_reggroup || group == vector_reggroup;
2334 else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2335 && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2336 return group == all_reggroup || group == vector_reggroup;
2337 /* RA_STATE is used for unwinding only. Do not assign it to any groups. */
2338 if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2341 return group == all_reggroup;
2344 /* Helper for aarch64_pseudo_read_value. */
2346 static struct value *
2347 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2348 readable_regcache *regcache, int regnum_offset,
2349 int regsize, struct value *result_value)
2351 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2353 /* Enough space for a full vector register. */
2354 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2355 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2357 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2358 mark_value_bytes_unavailable (result_value, 0,
2359 TYPE_LENGTH (value_type (result_value)));
2361 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2363 return result_value;
2366 /* Implement the "pseudo_register_read_value" gdbarch method. */
2368 static struct value *
2369 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2372 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2373 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2375 VALUE_LVAL (result_value) = lval_register;
2376 VALUE_REGNUM (result_value) = regnum;
2378 regnum -= gdbarch_num_regs (gdbarch);
2380 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2381 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2382 regnum - AARCH64_Q0_REGNUM,
2383 Q_REGISTER_SIZE, result_value);
2385 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2386 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2387 regnum - AARCH64_D0_REGNUM,
2388 D_REGISTER_SIZE, result_value);
2390 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2391 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2392 regnum - AARCH64_S0_REGNUM,
2393 S_REGISTER_SIZE, result_value);
2395 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2396 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2397 regnum - AARCH64_H0_REGNUM,
2398 H_REGISTER_SIZE, result_value);
2400 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2401 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2402 regnum - AARCH64_B0_REGNUM,
2403 B_REGISTER_SIZE, result_value);
2405 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2406 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2407 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2408 regnum - AARCH64_SVE_V0_REGNUM,
2409 V_REGISTER_SIZE, result_value);
2411 gdb_assert_not_reached ("regnum out of bound");
2414 /* Helper for aarch64_pseudo_write. */
2417 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2418 int regnum_offset, int regsize, const gdb_byte *buf)
2420 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2422 /* Enough space for a full vector register. */
2423 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2424 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2426 /* Ensure the register buffer is zero, we want gdb writes of the
2427 various 'scalar' pseudo registers to behavior like architectural
2428 writes, register width bytes are written the remainder are set to
2430 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2432 memcpy (reg_buf, buf, regsize);
2433 regcache->raw_write (v_regnum, reg_buf);
2436 /* Implement the "pseudo_register_write" gdbarch method. */
2439 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2440 int regnum, const gdb_byte *buf)
2442 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2443 regnum -= gdbarch_num_regs (gdbarch);
2445 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2446 return aarch64_pseudo_write_1 (gdbarch, regcache,
2447 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2450 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2451 return aarch64_pseudo_write_1 (gdbarch, regcache,
2452 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2455 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2456 return aarch64_pseudo_write_1 (gdbarch, regcache,
2457 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2460 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2461 return aarch64_pseudo_write_1 (gdbarch, regcache,
2462 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2465 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2466 return aarch64_pseudo_write_1 (gdbarch, regcache,
2467 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2470 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2471 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2472 return aarch64_pseudo_write_1 (gdbarch, regcache,
2473 regnum - AARCH64_SVE_V0_REGNUM,
2474 V_REGISTER_SIZE, buf);
2476 gdb_assert_not_reached ("regnum out of bound");
2479 /* Callback function for user_reg_add. */
2481 static struct value *
2482 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2484 const int *reg_p = (const int *) baton;
2486 return value_of_register (*reg_p, frame);
2490 /* Implement the "software_single_step" gdbarch method, needed to
2491 single step through atomic sequences on AArch64. */
2493 static std::vector<CORE_ADDR>
2494 aarch64_software_single_step (struct regcache *regcache)
2496 struct gdbarch *gdbarch = regcache->arch ();
2497 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2498 const int insn_size = 4;
2499 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2500 CORE_ADDR pc = regcache_read_pc (regcache);
2501 CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2503 CORE_ADDR closing_insn = 0;
2504 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2505 byte_order_for_code);
2508 int bc_insn_count = 0; /* Conditional branch instruction count. */
2509 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2512 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2515 /* Look for a Load Exclusive instruction which begins the sequence. */
2516 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2519 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2522 insn = read_memory_unsigned_integer (loc, insn_size,
2523 byte_order_for_code);
2525 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2527 /* Check if the instruction is a conditional branch. */
2528 if (inst.opcode->iclass == condbranch)
2530 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2532 if (bc_insn_count >= 1)
2535 /* It is, so we'll try to set a breakpoint at the destination. */
2536 breaks[1] = loc + inst.operands[0].imm.value;
2542 /* Look for the Store Exclusive which closes the atomic sequence. */
2543 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2550 /* We didn't find a closing Store Exclusive instruction, fall back. */
2554 /* Insert breakpoint after the end of the atomic sequence. */
2555 breaks[0] = loc + insn_size;
2557 /* Check for duplicated breakpoints, and also check that the second
2558 breakpoint is not within the atomic sequence. */
2560 && (breaks[1] == breaks[0]
2561 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2562 last_breakpoint = 0;
2564 std::vector<CORE_ADDR> next_pcs;
2566 /* Insert the breakpoint at the end of the sequence, and one at the
2567 destination of the conditional branch, if it exists. */
2568 for (index = 0; index <= last_breakpoint; index++)
2569 next_pcs.push_back (breaks[index]);
2574 struct aarch64_displaced_step_closure : public displaced_step_closure
2576 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2577 is being displaced stepping. */
2580 /* PC adjustment offset after displaced stepping. */
2581 int32_t pc_adjust = 0;
2584 /* Data when visiting instructions for displaced stepping. */
2586 struct aarch64_displaced_step_data
2588 struct aarch64_insn_data base;
2590 /* The address where the instruction will be executed at. */
2592 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2593 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2594 /* Number of instructions in INSN_BUF. */
2595 unsigned insn_count;
2596 /* Registers when doing displaced stepping. */
2597 struct regcache *regs;
2599 aarch64_displaced_step_closure *dsc;
2602 /* Implementation of aarch64_insn_visitor method "b". */
2605 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2606 struct aarch64_insn_data *data)
2608 struct aarch64_displaced_step_data *dsd
2609 = (struct aarch64_displaced_step_data *) data;
2610 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2612 if (can_encode_int32 (new_offset, 28))
2614 /* Emit B rather than BL, because executing BL on a new address
2615 will get the wrong address into LR. In order to avoid this,
2616 we emit B, and update LR if the instruction is BL. */
2617 emit_b (dsd->insn_buf, 0, new_offset);
2623 emit_nop (dsd->insn_buf);
2625 dsd->dsc->pc_adjust = offset;
2631 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2632 data->insn_addr + 4);
2636 /* Implementation of aarch64_insn_visitor method "b_cond". */
2639 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2640 struct aarch64_insn_data *data)
2642 struct aarch64_displaced_step_data *dsd
2643 = (struct aarch64_displaced_step_data *) data;
2645 /* GDB has to fix up PC after displaced step this instruction
2646 differently according to the condition is true or false. Instead
2647 of checking COND against conditional flags, we can use
2648 the following instructions, and GDB can tell how to fix up PC
2649 according to the PC value.
2651 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2657 emit_bcond (dsd->insn_buf, cond, 8);
2659 dsd->dsc->pc_adjust = offset;
2660 dsd->insn_count = 1;
2663 /* Dynamically allocate a new register. If we know the register
2664 statically, we should make it a global as above instead of using this
2667 static struct aarch64_register
2668 aarch64_register (unsigned num, int is64)
2670 return (struct aarch64_register) { num, is64 };
2673 /* Implementation of aarch64_insn_visitor method "cb". */
2676 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2677 const unsigned rn, int is64,
2678 struct aarch64_insn_data *data)
2680 struct aarch64_displaced_step_data *dsd
2681 = (struct aarch64_displaced_step_data *) data;
2683 /* The offset is out of range for a compare and branch
2684 instruction. We can use the following instructions instead:
2686 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2691 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2692 dsd->insn_count = 1;
2694 dsd->dsc->pc_adjust = offset;
2697 /* Implementation of aarch64_insn_visitor method "tb". */
2700 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2701 const unsigned rt, unsigned bit,
2702 struct aarch64_insn_data *data)
2704 struct aarch64_displaced_step_data *dsd
2705 = (struct aarch64_displaced_step_data *) data;
2707 /* The offset is out of range for a test bit and branch
2708 instruction We can use the following instructions instead:
2710 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2716 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2717 dsd->insn_count = 1;
2719 dsd->dsc->pc_adjust = offset;
2722 /* Implementation of aarch64_insn_visitor method "adr". */
2725 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2726 const int is_adrp, struct aarch64_insn_data *data)
2728 struct aarch64_displaced_step_data *dsd
2729 = (struct aarch64_displaced_step_data *) data;
2730 /* We know exactly the address the ADR{P,} instruction will compute.
2731 We can just write it to the destination register. */
2732 CORE_ADDR address = data->insn_addr + offset;
2736 /* Clear the lower 12 bits of the offset to get the 4K page. */
2737 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2741 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2744 dsd->dsc->pc_adjust = 4;
2745 emit_nop (dsd->insn_buf);
2746 dsd->insn_count = 1;
2749 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2752 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2753 const unsigned rt, const int is64,
2754 struct aarch64_insn_data *data)
2756 struct aarch64_displaced_step_data *dsd
2757 = (struct aarch64_displaced_step_data *) data;
2758 CORE_ADDR address = data->insn_addr + offset;
2759 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2761 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2765 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2766 aarch64_register (rt, 1), zero);
2768 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2769 aarch64_register (rt, 1), zero);
2771 dsd->dsc->pc_adjust = 4;
2774 /* Implementation of aarch64_insn_visitor method "others". */
2777 aarch64_displaced_step_others (const uint32_t insn,
2778 struct aarch64_insn_data *data)
2780 struct aarch64_displaced_step_data *dsd
2781 = (struct aarch64_displaced_step_data *) data;
2783 aarch64_emit_insn (dsd->insn_buf, insn);
2784 dsd->insn_count = 1;
2786 if ((insn & 0xfffffc1f) == 0xd65f0000)
2789 dsd->dsc->pc_adjust = 0;
2792 dsd->dsc->pc_adjust = 4;
2795 static const struct aarch64_insn_visitor visitor =
2797 aarch64_displaced_step_b,
2798 aarch64_displaced_step_b_cond,
2799 aarch64_displaced_step_cb,
2800 aarch64_displaced_step_tb,
2801 aarch64_displaced_step_adr,
2802 aarch64_displaced_step_ldr_literal,
2803 aarch64_displaced_step_others,
2806 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2808 struct displaced_step_closure *
2809 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2810 CORE_ADDR from, CORE_ADDR to,
2811 struct regcache *regs)
2813 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2814 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2815 struct aarch64_displaced_step_data dsd;
2818 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2821 /* Look for a Load Exclusive instruction which begins the sequence. */
2822 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2824 /* We can't displaced step atomic sequences. */
2828 std::unique_ptr<aarch64_displaced_step_closure> dsc
2829 (new aarch64_displaced_step_closure);
2830 dsd.base.insn_addr = from;
2833 dsd.dsc = dsc.get ();
2835 aarch64_relocate_instruction (insn, &visitor,
2836 (struct aarch64_insn_data *) &dsd);
2837 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2839 if (dsd.insn_count != 0)
2843 /* Instruction can be relocated to scratch pad. Copy
2844 relocated instruction(s) there. */
2845 for (i = 0; i < dsd.insn_count; i++)
2847 if (debug_displaced)
2849 debug_printf ("displaced: writing insn ");
2850 debug_printf ("%.8x", dsd.insn_buf[i]);
2851 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2853 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2854 (ULONGEST) dsd.insn_buf[i]);
2862 return dsc.release ();
2865 /* Implement the "displaced_step_fixup" gdbarch method. */
2868 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2869 struct displaced_step_closure *dsc_,
2870 CORE_ADDR from, CORE_ADDR to,
2871 struct regcache *regs)
2873 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2879 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2882 /* Condition is true. */
2884 else if (pc - to == 4)
2886 /* Condition is false. */
2890 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2893 if (dsc->pc_adjust != 0)
2895 if (debug_displaced)
2897 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2898 paddress (gdbarch, from), dsc->pc_adjust);
2900 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2901 from + dsc->pc_adjust);
2905 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2908 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2909 struct displaced_step_closure *closure)
2914 /* Get the correct target description for the given VQ value.
2915 If VQ is zero then it is assumed SVE is not supported.
2916 (It is not possible to set VQ to zero on an SVE system). */
2919 aarch64_read_description (uint64_t vq, bool pauth_p)
2921 if (vq > AARCH64_MAX_SVE_VQ)
2922 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
2923 AARCH64_MAX_SVE_VQ);
2925 struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p];
2929 tdesc = aarch64_create_target_description (vq, pauth_p);
2930 tdesc_aarch64_list[vq][pauth_p] = tdesc;
2936 /* Return the VQ used when creating the target description TDESC. */
2939 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2941 const struct tdesc_feature *feature_sve;
2943 if (!tdesc_has_registers (tdesc))
2946 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2948 if (feature_sve == nullptr)
2951 uint64_t vl = tdesc_register_bitsize (feature_sve,
2952 aarch64_sve_register_names[0]) / 8;
2953 return sve_vq_from_vl (vl);
2956 /* Add all the expected register sets into GDBARCH. */
2959 aarch64_add_reggroups (struct gdbarch *gdbarch)
2961 reggroup_add (gdbarch, general_reggroup);
2962 reggroup_add (gdbarch, float_reggroup);
2963 reggroup_add (gdbarch, system_reggroup);
2964 reggroup_add (gdbarch, vector_reggroup);
2965 reggroup_add (gdbarch, all_reggroup);
2966 reggroup_add (gdbarch, save_reggroup);
2967 reggroup_add (gdbarch, restore_reggroup);
2970 /* Implement the "cannot_store_register" gdbarch method. */
2973 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
2975 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2977 if (!tdep->has_pauth ())
2980 /* Pointer authentication registers are read-only. */
2981 return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
2982 || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
2985 /* Initialize the current architecture based on INFO. If possible,
2986 re-use an architecture from ARCHES, which is a list of
2987 architectures already created during this debugging session.
2989 Called e.g. at program startup, when reading a core file, and when
2990 reading a binary file. */
2992 static struct gdbarch *
2993 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2995 struct gdbarch_tdep *tdep;
2996 struct gdbarch *gdbarch;
2997 struct gdbarch_list *best_arch;
2998 struct tdesc_arch_data *tdesc_data = NULL;
2999 const struct target_desc *tdesc = info.target_desc;
3002 const struct tdesc_feature *feature_core;
3003 const struct tdesc_feature *feature_fpu;
3004 const struct tdesc_feature *feature_sve;
3005 const struct tdesc_feature *feature_pauth;
3007 int num_pseudo_regs = 0;
3008 int first_pauth_regnum = -1;
3009 int pauth_ra_state_offset = -1;
3011 /* Ensure we always have a target description. */
3012 if (!tdesc_has_registers (tdesc))
3013 tdesc = aarch64_read_description (0, false);
3016 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
3017 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3018 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3019 feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3021 if (feature_core == NULL)
3024 tdesc_data = tdesc_data_alloc ();
3026 /* Validate the description provides the mandatory core R registers
3027 and allocate their numbers. */
3028 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3029 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
3030 AARCH64_X0_REGNUM + i,
3031 aarch64_r_register_names[i]);
3033 num_regs = AARCH64_X0_REGNUM + i;
3035 /* Add the V registers. */
3036 if (feature_fpu != NULL)
3038 if (feature_sve != NULL)
3039 error (_("Program contains both fpu and SVE features."));
3041 /* Validate the description provides the mandatory V registers
3042 and allocate their numbers. */
3043 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3044 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
3045 AARCH64_V0_REGNUM + i,
3046 aarch64_v_register_names[i]);
3048 num_regs = AARCH64_V0_REGNUM + i;
3051 /* Add the SVE registers. */
3052 if (feature_sve != NULL)
3054 /* Validate the description provides the mandatory SVE registers
3055 and allocate their numbers. */
3056 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3057 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
3058 AARCH64_SVE_Z0_REGNUM + i,
3059 aarch64_sve_register_names[i]);
3061 num_regs = AARCH64_SVE_Z0_REGNUM + i;
3062 num_pseudo_regs += 32; /* add the Vn register pseudos. */
3065 if (feature_fpu != NULL || feature_sve != NULL)
3067 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
3068 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3069 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3070 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3071 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3074 /* Add the pauth registers. */
3075 if (feature_pauth != NULL)
3077 first_pauth_regnum = num_regs;
3078 pauth_ra_state_offset = num_pseudo_regs;
3079 /* Validate the descriptor provides the mandatory PAUTH registers and
3080 allocate their numbers. */
3081 for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3082 valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data,
3083 first_pauth_regnum + i,
3084 aarch64_pauth_register_names[i]);
3087 num_pseudo_regs += 1; /* Count RA_STATE pseudo register. */
3092 tdesc_data_cleanup (tdesc_data);
3096 /* AArch64 code is always little-endian. */
3097 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3099 /* If there is already a candidate, use it. */
3100 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3102 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3104 /* Found a match. */
3108 if (best_arch != NULL)
3110 if (tdesc_data != NULL)
3111 tdesc_data_cleanup (tdesc_data);
3112 return best_arch->gdbarch;
3115 tdep = XCNEW (struct gdbarch_tdep);
3116 gdbarch = gdbarch_alloc (&info, tdep);
3118 /* This should be low enough for everything. */
3119 tdep->lowest_pc = 0x20;
3120 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3121 tdep->jb_elt_size = 8;
3122 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3123 tdep->pauth_reg_base = first_pauth_regnum;
3124 tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3125 : pauth_ra_state_offset + num_regs;
3128 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3129 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3131 /* Advance PC across function entry code. */
3132 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3134 /* The stack grows downward. */
3135 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3137 /* Breakpoint manipulation. */
3138 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3139 aarch64_breakpoint::kind_from_pc);
3140 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3141 aarch64_breakpoint::bp_from_kind);
3142 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3143 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3145 /* Information about registers, etc. */
3146 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3147 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3148 set_gdbarch_num_regs (gdbarch, num_regs);
3150 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3151 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3152 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3153 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3154 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3155 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3156 aarch64_pseudo_register_reggroup_p);
3157 set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3160 set_gdbarch_short_bit (gdbarch, 16);
3161 set_gdbarch_int_bit (gdbarch, 32);
3162 set_gdbarch_float_bit (gdbarch, 32);
3163 set_gdbarch_double_bit (gdbarch, 64);
3164 set_gdbarch_long_double_bit (gdbarch, 128);
3165 set_gdbarch_long_bit (gdbarch, 64);
3166 set_gdbarch_long_long_bit (gdbarch, 64);
3167 set_gdbarch_ptr_bit (gdbarch, 64);
3168 set_gdbarch_char_signed (gdbarch, 0);
3169 set_gdbarch_wchar_signed (gdbarch, 0);
3170 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3171 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3172 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3174 /* Internal <-> external register number maps. */
3175 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3177 /* Returning results. */
3178 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3181 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3183 /* Virtual tables. */
3184 set_gdbarch_vbit_in_delta (gdbarch, 1);
3186 /* Register architecture. */
3187 aarch64_add_reggroups (gdbarch);
3189 /* Hook in the ABI-specific overrides, if they have been registered. */
3190 info.target_desc = tdesc;
3191 info.tdesc_data = tdesc_data;
3192 gdbarch_init_osabi (info, gdbarch);
3194 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3196 /* Add some default predicates. */
3197 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3198 dwarf2_append_unwinders (gdbarch);
3199 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3201 frame_base_set_default (gdbarch, &aarch64_normal_base);
3203 /* Now we have tuned the configuration, set a few final things,
3204 based on what the OS ABI has told us. */
3206 if (tdep->jb_pc >= 0)
3207 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3209 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3211 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3213 /* Add standard register aliases. */
3214 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3215 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3216 value_of_aarch64_user_reg,
3217 &aarch64_register_aliases[i].regnum);
3219 register_aarch64_ravenscar_ops (gdbarch);
3225 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3227 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3232 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3233 paddress (gdbarch, tdep->lowest_pc));
3239 static void aarch64_process_record_test (void);
3244 _initialize_aarch64_tdep (void)
3246 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3249 /* Debug this file's internals. */
3250 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3251 Set AArch64 debugging."), _("\
3252 Show AArch64 debugging."), _("\
3253 When on, AArch64 specific debugging is enabled."),
3256 &setdebuglist, &showdebuglist);
3259 selftests::register_test ("aarch64-analyze-prologue",
3260 selftests::aarch64_analyze_prologue_test);
3261 selftests::register_test ("aarch64-process-record",
3262 selftests::aarch64_process_record_test);
3263 selftests::record_xml_tdesc ("aarch64.xml",
3264 aarch64_create_target_description (0, false));
3268 /* AArch64 process record-replay related structures, defines etc. */
3270 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3273 unsigned int reg_len = LENGTH; \
3276 REGS = XNEWVEC (uint32_t, reg_len); \
3277 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3282 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3285 unsigned int mem_len = LENGTH; \
3288 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3289 memcpy(&MEMS->len, &RECORD_BUF[0], \
3290 sizeof(struct aarch64_mem_r) * LENGTH); \
3295 /* AArch64 record/replay structures and enumerations. */
3297 struct aarch64_mem_r
3299 uint64_t len; /* Record length. */
3300 uint64_t addr; /* Memory address. */
3303 enum aarch64_record_result
3305 AARCH64_RECORD_SUCCESS,
3306 AARCH64_RECORD_UNSUPPORTED,
3307 AARCH64_RECORD_UNKNOWN
3310 typedef struct insn_decode_record_t
3312 struct gdbarch *gdbarch;
3313 struct regcache *regcache;
3314 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3315 uint32_t aarch64_insn; /* Insn to be recorded. */
3316 uint32_t mem_rec_count; /* Count of memory records. */
3317 uint32_t reg_rec_count; /* Count of register records. */
3318 uint32_t *aarch64_regs; /* Registers to be recorded. */
3319 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3320 } insn_decode_record;
3322 /* Record handler for data processing - register instructions. */
3325 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3327 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3328 uint32_t record_buf[4];
3330 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3331 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3332 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3334 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3338 /* Logical (shifted register). */
3339 if (insn_bits24_27 == 0x0a)
3340 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3342 else if (insn_bits24_27 == 0x0b)
3343 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3345 return AARCH64_RECORD_UNKNOWN;
3347 record_buf[0] = reg_rd;
3348 aarch64_insn_r->reg_rec_count = 1;
3350 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3354 if (insn_bits24_27 == 0x0b)
3356 /* Data-processing (3 source). */
3357 record_buf[0] = reg_rd;
3358 aarch64_insn_r->reg_rec_count = 1;
3360 else if (insn_bits24_27 == 0x0a)
3362 if (insn_bits21_23 == 0x00)
3364 /* Add/subtract (with carry). */
3365 record_buf[0] = reg_rd;
3366 aarch64_insn_r->reg_rec_count = 1;
3367 if (bit (aarch64_insn_r->aarch64_insn, 29))
3369 record_buf[1] = AARCH64_CPSR_REGNUM;
3370 aarch64_insn_r->reg_rec_count = 2;
3373 else if (insn_bits21_23 == 0x02)
3375 /* Conditional compare (register) and conditional compare
3376 (immediate) instructions. */
3377 record_buf[0] = AARCH64_CPSR_REGNUM;
3378 aarch64_insn_r->reg_rec_count = 1;
3380 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3382 /* CConditional select. */
3383 /* Data-processing (2 source). */
3384 /* Data-processing (1 source). */
3385 record_buf[0] = reg_rd;
3386 aarch64_insn_r->reg_rec_count = 1;
3389 return AARCH64_RECORD_UNKNOWN;
3393 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3395 return AARCH64_RECORD_SUCCESS;
3398 /* Record handler for data processing - immediate instructions. */
3401 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3403 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3404 uint32_t record_buf[4];
3406 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3407 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3408 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3410 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3411 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3412 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3414 record_buf[0] = reg_rd;
3415 aarch64_insn_r->reg_rec_count = 1;
3417 else if (insn_bits24_27 == 0x01)
3419 /* Add/Subtract (immediate). */
3420 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3421 record_buf[0] = reg_rd;
3422 aarch64_insn_r->reg_rec_count = 1;
3424 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3426 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3428 /* Logical (immediate). */
3429 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3430 record_buf[0] = reg_rd;
3431 aarch64_insn_r->reg_rec_count = 1;
3433 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3436 return AARCH64_RECORD_UNKNOWN;
3438 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3440 return AARCH64_RECORD_SUCCESS;
3443 /* Record handler for branch, exception generation and system instructions. */
3446 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3448 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3449 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3450 uint32_t record_buf[4];
3452 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3453 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3454 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3456 if (insn_bits28_31 == 0x0d)
3458 /* Exception generation instructions. */
3459 if (insn_bits24_27 == 0x04)
3461 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3462 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3463 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3465 ULONGEST svc_number;
3467 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3469 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3473 return AARCH64_RECORD_UNSUPPORTED;
3475 /* System instructions. */
3476 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3478 uint32_t reg_rt, reg_crn;
3480 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3481 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3483 /* Record rt in case of sysl and mrs instructions. */
3484 if (bit (aarch64_insn_r->aarch64_insn, 21))
3486 record_buf[0] = reg_rt;
3487 aarch64_insn_r->reg_rec_count = 1;
3489 /* Record cpsr for hint and msr(immediate) instructions. */
3490 else if (reg_crn == 0x02 || reg_crn == 0x04)
3492 record_buf[0] = AARCH64_CPSR_REGNUM;
3493 aarch64_insn_r->reg_rec_count = 1;
3496 /* Unconditional branch (register). */
3497 else if((insn_bits24_27 & 0x0e) == 0x06)
3499 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3500 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3501 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3504 return AARCH64_RECORD_UNKNOWN;
3506 /* Unconditional branch (immediate). */
3507 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3509 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3510 if (bit (aarch64_insn_r->aarch64_insn, 31))
3511 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3514 /* Compare & branch (immediate), Test & branch (immediate) and
3515 Conditional branch (immediate). */
3516 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3518 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3520 return AARCH64_RECORD_SUCCESS;
3523 /* Record handler for advanced SIMD load and store instructions. */
3526 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3529 uint64_t addr_offset = 0;
3530 uint32_t record_buf[24];
3531 uint64_t record_buf_mem[24];
3532 uint32_t reg_rn, reg_rt;
3533 uint32_t reg_index = 0, mem_index = 0;
3534 uint8_t opcode_bits, size_bits;
3536 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3537 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3538 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3539 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3540 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3543 debug_printf ("Process record: Advanced SIMD load/store\n");
3545 /* Load/store single structure. */
3546 if (bit (aarch64_insn_r->aarch64_insn, 24))
3548 uint8_t sindex, scale, selem, esize, replicate = 0;
3549 scale = opcode_bits >> 2;
3550 selem = ((opcode_bits & 0x02) |
3551 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3555 if (size_bits & 0x01)
3556 return AARCH64_RECORD_UNKNOWN;
3559 if ((size_bits >> 1) & 0x01)
3560 return AARCH64_RECORD_UNKNOWN;
3561 if (size_bits & 0x01)
3563 if (!((opcode_bits >> 1) & 0x01))
3566 return AARCH64_RECORD_UNKNOWN;
3570 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3577 return AARCH64_RECORD_UNKNOWN;
3583 for (sindex = 0; sindex < selem; sindex++)
3585 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3586 reg_rt = (reg_rt + 1) % 32;
3590 for (sindex = 0; sindex < selem; sindex++)
3592 if (bit (aarch64_insn_r->aarch64_insn, 22))
3593 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3596 record_buf_mem[mem_index++] = esize / 8;
3597 record_buf_mem[mem_index++] = address + addr_offset;
3599 addr_offset = addr_offset + (esize / 8);
3600 reg_rt = (reg_rt + 1) % 32;
3604 /* Load/store multiple structure. */
3607 uint8_t selem, esize, rpt, elements;
3608 uint8_t eindex, rindex;
3610 esize = 8 << size_bits;
3611 if (bit (aarch64_insn_r->aarch64_insn, 30))
3612 elements = 128 / esize;
3614 elements = 64 / esize;
3616 switch (opcode_bits)
3618 /*LD/ST4 (4 Registers). */
3623 /*LD/ST1 (4 Registers). */
3628 /*LD/ST3 (3 Registers). */
3633 /*LD/ST1 (3 Registers). */
3638 /*LD/ST1 (1 Register). */
3643 /*LD/ST2 (2 Registers). */
3648 /*LD/ST1 (2 Registers). */
3654 return AARCH64_RECORD_UNSUPPORTED;
3657 for (rindex = 0; rindex < rpt; rindex++)
3658 for (eindex = 0; eindex < elements; eindex++)
3660 uint8_t reg_tt, sindex;
3661 reg_tt = (reg_rt + rindex) % 32;
3662 for (sindex = 0; sindex < selem; sindex++)
3664 if (bit (aarch64_insn_r->aarch64_insn, 22))
3665 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3668 record_buf_mem[mem_index++] = esize / 8;
3669 record_buf_mem[mem_index++] = address + addr_offset;
3671 addr_offset = addr_offset + (esize / 8);
3672 reg_tt = (reg_tt + 1) % 32;
3677 if (bit (aarch64_insn_r->aarch64_insn, 23))
3678 record_buf[reg_index++] = reg_rn;
3680 aarch64_insn_r->reg_rec_count = reg_index;
3681 aarch64_insn_r->mem_rec_count = mem_index / 2;
3682 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3684 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3686 return AARCH64_RECORD_SUCCESS;
3689 /* Record handler for load and store instructions. */
3692 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3694 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3695 uint8_t insn_bit23, insn_bit21;
3696 uint8_t opc, size_bits, ld_flag, vector_flag;
3697 uint32_t reg_rn, reg_rt, reg_rt2;
3698 uint64_t datasize, offset;
3699 uint32_t record_buf[8];
3700 uint64_t record_buf_mem[8];
3703 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3704 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3705 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3706 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3707 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3708 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3709 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3710 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3711 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3712 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3713 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3715 /* Load/store exclusive. */
3716 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3719 debug_printf ("Process record: load/store exclusive\n");
3723 record_buf[0] = reg_rt;
3724 aarch64_insn_r->reg_rec_count = 1;
3727 record_buf[1] = reg_rt2;
3728 aarch64_insn_r->reg_rec_count = 2;
3734 datasize = (8 << size_bits) * 2;
3736 datasize = (8 << size_bits);
3737 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3739 record_buf_mem[0] = datasize / 8;
3740 record_buf_mem[1] = address;
3741 aarch64_insn_r->mem_rec_count = 1;
3744 /* Save register rs. */
3745 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3746 aarch64_insn_r->reg_rec_count = 1;
3750 /* Load register (literal) instructions decoding. */
3751 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3754 debug_printf ("Process record: load register (literal)\n");
3756 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3758 record_buf[0] = reg_rt;
3759 aarch64_insn_r->reg_rec_count = 1;
3761 /* All types of load/store pair instructions decoding. */
3762 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3765 debug_printf ("Process record: load/store pair\n");
3771 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3772 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3776 record_buf[0] = reg_rt;
3777 record_buf[1] = reg_rt2;
3779 aarch64_insn_r->reg_rec_count = 2;
3784 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3786 size_bits = size_bits >> 1;
3787 datasize = 8 << (2 + size_bits);
3788 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3789 offset = offset << (2 + size_bits);
3790 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3792 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3794 if (imm7_off & 0x40)
3795 address = address - offset;
3797 address = address + offset;
3800 record_buf_mem[0] = datasize / 8;
3801 record_buf_mem[1] = address;
3802 record_buf_mem[2] = datasize / 8;
3803 record_buf_mem[3] = address + (datasize / 8);
3804 aarch64_insn_r->mem_rec_count = 2;
3806 if (bit (aarch64_insn_r->aarch64_insn, 23))
3807 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3809 /* Load/store register (unsigned immediate) instructions. */
3810 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3812 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3822 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3824 /* PRFM (immediate) */
3825 return AARCH64_RECORD_SUCCESS;
3827 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3829 /* LDRSW (immediate) */
3843 debug_printf ("Process record: load/store (unsigned immediate):"
3844 " size %x V %d opc %x\n", size_bits, vector_flag,
3850 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3851 datasize = 8 << size_bits;
3852 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3854 offset = offset << size_bits;
3855 address = address + offset;
3857 record_buf_mem[0] = datasize >> 3;
3858 record_buf_mem[1] = address;
3859 aarch64_insn_r->mem_rec_count = 1;
3864 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3866 record_buf[0] = reg_rt;
3867 aarch64_insn_r->reg_rec_count = 1;
3870 /* Load/store register (register offset) instructions. */
3871 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3872 && insn_bits10_11 == 0x02 && insn_bit21)
3875 debug_printf ("Process record: load/store (register offset)\n");
3876 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3883 if (size_bits != 0x03)
3886 return AARCH64_RECORD_UNKNOWN;
3890 ULONGEST reg_rm_val;
3892 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3893 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3894 if (bit (aarch64_insn_r->aarch64_insn, 12))
3895 offset = reg_rm_val << size_bits;
3897 offset = reg_rm_val;
3898 datasize = 8 << size_bits;
3899 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3901 address = address + offset;
3902 record_buf_mem[0] = datasize >> 3;
3903 record_buf_mem[1] = address;
3904 aarch64_insn_r->mem_rec_count = 1;
3909 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3911 record_buf[0] = reg_rt;
3912 aarch64_insn_r->reg_rec_count = 1;
3915 /* Load/store register (immediate and unprivileged) instructions. */
3916 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3921 debug_printf ("Process record: load/store "
3922 "(immediate and unprivileged)\n");
3924 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3931 if (size_bits != 0x03)
3934 return AARCH64_RECORD_UNKNOWN;
3939 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3940 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3941 datasize = 8 << size_bits;
3942 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3944 if (insn_bits10_11 != 0x01)
3946 if (imm9_off & 0x0100)
3947 address = address - offset;
3949 address = address + offset;
3951 record_buf_mem[0] = datasize >> 3;
3952 record_buf_mem[1] = address;
3953 aarch64_insn_r->mem_rec_count = 1;
3958 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3960 record_buf[0] = reg_rt;
3961 aarch64_insn_r->reg_rec_count = 1;
3963 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3964 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3966 /* Advanced SIMD load/store instructions. */
3968 return aarch64_record_asimd_load_store (aarch64_insn_r);
3970 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3972 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3974 return AARCH64_RECORD_SUCCESS;
3977 /* Record handler for data processing SIMD and floating point instructions. */
3980 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3982 uint8_t insn_bit21, opcode, rmode, reg_rd;
3983 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3984 uint8_t insn_bits11_14;
3985 uint32_t record_buf[2];
3987 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3988 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3989 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3990 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3991 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3992 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3993 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3994 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3995 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3998 debug_printf ("Process record: data processing SIMD/FP: ");
4000 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4002 /* Floating point - fixed point conversion instructions. */
4006 debug_printf ("FP - fixed point conversion");
4008 if ((opcode >> 1) == 0x0 && rmode == 0x03)
4009 record_buf[0] = reg_rd;
4011 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4013 /* Floating point - conditional compare instructions. */
4014 else if (insn_bits10_11 == 0x01)
4017 debug_printf ("FP - conditional compare");
4019 record_buf[0] = AARCH64_CPSR_REGNUM;
4021 /* Floating point - data processing (2-source) and
4022 conditional select instructions. */
4023 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4026 debug_printf ("FP - DP (2-source)");
4028 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4030 else if (insn_bits10_11 == 0x00)
4032 /* Floating point - immediate instructions. */
4033 if ((insn_bits12_15 & 0x01) == 0x01
4034 || (insn_bits12_15 & 0x07) == 0x04)
4037 debug_printf ("FP - immediate");
4038 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4040 /* Floating point - compare instructions. */
4041 else if ((insn_bits12_15 & 0x03) == 0x02)
4044 debug_printf ("FP - immediate");
4045 record_buf[0] = AARCH64_CPSR_REGNUM;
4047 /* Floating point - integer conversions instructions. */
4048 else if (insn_bits12_15 == 0x00)
4050 /* Convert float to integer instruction. */
4051 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4054 debug_printf ("float to int conversion");
4056 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4058 /* Convert integer to float instruction. */
4059 else if ((opcode >> 1) == 0x01 && !rmode)
4062 debug_printf ("int to float conversion");
4064 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4066 /* Move float to integer instruction. */
4067 else if ((opcode >> 1) == 0x03)
4070 debug_printf ("move float to int");
4072 if (!(opcode & 0x01))
4073 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4075 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4078 return AARCH64_RECORD_UNKNOWN;
4081 return AARCH64_RECORD_UNKNOWN;
4084 return AARCH64_RECORD_UNKNOWN;
4086 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4089 debug_printf ("SIMD copy");
4091 /* Advanced SIMD copy instructions. */
4092 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4093 && !bit (aarch64_insn_r->aarch64_insn, 15)
4094 && bit (aarch64_insn_r->aarch64_insn, 10))
4096 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4097 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4099 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4102 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4104 /* All remaining floating point or advanced SIMD instructions. */
4108 debug_printf ("all remain");
4110 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4114 debug_printf ("\n");
4116 aarch64_insn_r->reg_rec_count++;
4117 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4118 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4120 return AARCH64_RECORD_SUCCESS;
4123 /* Decodes insns type and invokes its record handler. */
4126 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4128 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4130 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4131 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4132 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4133 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4135 /* Data processing - immediate instructions. */
4136 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4137 return aarch64_record_data_proc_imm (aarch64_insn_r);
4139 /* Branch, exception generation and system instructions. */
4140 if (ins_bit26 && !ins_bit27 && ins_bit28)
4141 return aarch64_record_branch_except_sys (aarch64_insn_r);
4143 /* Load and store instructions. */
4144 if (!ins_bit25 && ins_bit27)
4145 return aarch64_record_load_store (aarch64_insn_r);
4147 /* Data processing - register instructions. */
4148 if (ins_bit25 && !ins_bit26 && ins_bit27)
4149 return aarch64_record_data_proc_reg (aarch64_insn_r);
4151 /* Data processing - SIMD and floating point instructions. */
4152 if (ins_bit25 && ins_bit26 && ins_bit27)
4153 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4155 return AARCH64_RECORD_UNSUPPORTED;
4158 /* Cleans up local record registers and memory allocations. */
4161 deallocate_reg_mem (insn_decode_record *record)
4163 xfree (record->aarch64_regs);
4164 xfree (record->aarch64_mems);
4168 namespace selftests {
4171 aarch64_process_record_test (void)
4173 struct gdbarch_info info;
4176 gdbarch_info_init (&info);
4177 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4179 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4180 SELF_CHECK (gdbarch != NULL);
4182 insn_decode_record aarch64_record;
4184 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4185 aarch64_record.regcache = NULL;
4186 aarch64_record.this_addr = 0;
4187 aarch64_record.gdbarch = gdbarch;
4189 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4190 aarch64_record.aarch64_insn = 0xf9800020;
4191 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4192 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4193 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4194 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4196 deallocate_reg_mem (&aarch64_record);
4199 } // namespace selftests
4200 #endif /* GDB_SELF_TEST */
4202 /* Parse the current instruction and record the values of the registers and
4203 memory that will be changed in current instruction to record_arch_list
4204 return -1 if something is wrong. */
4207 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4208 CORE_ADDR insn_addr)
4210 uint32_t rec_no = 0;
4211 uint8_t insn_size = 4;
4213 gdb_byte buf[insn_size];
4214 insn_decode_record aarch64_record;
4216 memset (&buf[0], 0, insn_size);
4217 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4218 target_read_memory (insn_addr, &buf[0], insn_size);
4219 aarch64_record.aarch64_insn
4220 = (uint32_t) extract_unsigned_integer (&buf[0],
4222 gdbarch_byte_order (gdbarch));
4223 aarch64_record.regcache = regcache;
4224 aarch64_record.this_addr = insn_addr;
4225 aarch64_record.gdbarch = gdbarch;
4227 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4228 if (ret == AARCH64_RECORD_UNSUPPORTED)
4230 printf_unfiltered (_("Process record does not support instruction "
4231 "0x%0x at address %s.\n"),
4232 aarch64_record.aarch64_insn,
4233 paddress (gdbarch, insn_addr));
4239 /* Record registers. */
4240 record_full_arch_list_add_reg (aarch64_record.regcache,
4242 /* Always record register CPSR. */
4243 record_full_arch_list_add_reg (aarch64_record.regcache,
4244 AARCH64_CPSR_REGNUM);
4245 if (aarch64_record.aarch64_regs)
4246 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4247 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4248 aarch64_record.aarch64_regs[rec_no]))
4251 /* Record memories. */
4252 if (aarch64_record.aarch64_mems)
4253 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4254 if (record_full_arch_list_add_mem
4255 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4256 aarch64_record.aarch64_mems[rec_no].len))
4259 if (record_full_arch_list_add_end ())
4263 deallocate_reg_mem (&aarch64_record);