1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
37 #include "dwarf2-frame.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
59 #include "opcode/aarch64.h"
62 #define submask(x) ((1L << ((x) + 1)) - 1)
63 #define bit(obj,st) (((obj) >> (st)) & 1)
64 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
66 /* Pseudo register base numbers. */
67 #define AARCH64_Q0_REGNUM 0
68 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
69 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
72 #define AARCH64_SVE_V0_REGNUM (AARCH64_B0_REGNUM + 32)
74 /* All possible aarch64 target descriptors. */
75 struct target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1];
77 /* The standard register names, and all the valid aliases for them. */
80 const char *const name;
82 } aarch64_register_aliases[] =
84 /* 64-bit register names. */
85 {"fp", AARCH64_FP_REGNUM},
86 {"lr", AARCH64_LR_REGNUM},
87 {"sp", AARCH64_SP_REGNUM},
89 /* 32-bit register names. */
90 {"w0", AARCH64_X0_REGNUM + 0},
91 {"w1", AARCH64_X0_REGNUM + 1},
92 {"w2", AARCH64_X0_REGNUM + 2},
93 {"w3", AARCH64_X0_REGNUM + 3},
94 {"w4", AARCH64_X0_REGNUM + 4},
95 {"w5", AARCH64_X0_REGNUM + 5},
96 {"w6", AARCH64_X0_REGNUM + 6},
97 {"w7", AARCH64_X0_REGNUM + 7},
98 {"w8", AARCH64_X0_REGNUM + 8},
99 {"w9", AARCH64_X0_REGNUM + 9},
100 {"w10", AARCH64_X0_REGNUM + 10},
101 {"w11", AARCH64_X0_REGNUM + 11},
102 {"w12", AARCH64_X0_REGNUM + 12},
103 {"w13", AARCH64_X0_REGNUM + 13},
104 {"w14", AARCH64_X0_REGNUM + 14},
105 {"w15", AARCH64_X0_REGNUM + 15},
106 {"w16", AARCH64_X0_REGNUM + 16},
107 {"w17", AARCH64_X0_REGNUM + 17},
108 {"w18", AARCH64_X0_REGNUM + 18},
109 {"w19", AARCH64_X0_REGNUM + 19},
110 {"w20", AARCH64_X0_REGNUM + 20},
111 {"w21", AARCH64_X0_REGNUM + 21},
112 {"w22", AARCH64_X0_REGNUM + 22},
113 {"w23", AARCH64_X0_REGNUM + 23},
114 {"w24", AARCH64_X0_REGNUM + 24},
115 {"w25", AARCH64_X0_REGNUM + 25},
116 {"w26", AARCH64_X0_REGNUM + 26},
117 {"w27", AARCH64_X0_REGNUM + 27},
118 {"w28", AARCH64_X0_REGNUM + 28},
119 {"w29", AARCH64_X0_REGNUM + 29},
120 {"w30", AARCH64_X0_REGNUM + 30},
123 {"ip0", AARCH64_X0_REGNUM + 16},
124 {"ip1", AARCH64_X0_REGNUM + 17}
127 /* The required core 'R' registers. */
128 static const char *const aarch64_r_register_names[] =
130 /* These registers must appear in consecutive RAW register number
131 order and they must begin with AARCH64_X0_REGNUM! */
132 "x0", "x1", "x2", "x3",
133 "x4", "x5", "x6", "x7",
134 "x8", "x9", "x10", "x11",
135 "x12", "x13", "x14", "x15",
136 "x16", "x17", "x18", "x19",
137 "x20", "x21", "x22", "x23",
138 "x24", "x25", "x26", "x27",
139 "x28", "x29", "x30", "sp",
143 /* The FP/SIMD 'V' registers. */
144 static const char *const aarch64_v_register_names[] =
146 /* These registers must appear in consecutive RAW register number
147 order and they must begin with AARCH64_V0_REGNUM! */
148 "v0", "v1", "v2", "v3",
149 "v4", "v5", "v6", "v7",
150 "v8", "v9", "v10", "v11",
151 "v12", "v13", "v14", "v15",
152 "v16", "v17", "v18", "v19",
153 "v20", "v21", "v22", "v23",
154 "v24", "v25", "v26", "v27",
155 "v28", "v29", "v30", "v31",
160 /* The SVE 'Z' and 'P' registers. */
161 static const char *const aarch64_sve_register_names[] =
163 /* These registers must appear in consecutive RAW register number
164 order and they must begin with AARCH64_SVE_Z0_REGNUM! */
165 "z0", "z1", "z2", "z3",
166 "z4", "z5", "z6", "z7",
167 "z8", "z9", "z10", "z11",
168 "z12", "z13", "z14", "z15",
169 "z16", "z17", "z18", "z19",
170 "z20", "z21", "z22", "z23",
171 "z24", "z25", "z26", "z27",
172 "z28", "z29", "z30", "z31",
174 "p0", "p1", "p2", "p3",
175 "p4", "p5", "p6", "p7",
176 "p8", "p9", "p10", "p11",
177 "p12", "p13", "p14", "p15",
181 /* AArch64 prologue cache structure. */
182 struct aarch64_prologue_cache
184 /* The program counter at the start of the function. It is used to
185 identify this frame as a prologue frame. */
188 /* The program counter at the time this frame was created; i.e. where
189 this function was called from. It is used to identify this frame as a
193 /* The stack pointer at the time this frame was created; i.e. the
194 caller's stack pointer when this function was called. It is used
195 to identify this frame. */
198 /* Is the target available to read from? */
201 /* The frame base for this frame is just prev_sp - frame size.
202 FRAMESIZE is the distance from the frame pointer to the
203 initial stack pointer. */
206 /* The register used to hold the frame pointer for this frame. */
209 /* Saved register offsets. */
210 struct trad_frame_saved_reg *saved_regs;
214 show_aarch64_debug (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
217 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
222 /* Abstract instruction reader. */
224 class abstract_instruction_reader
227 /* Read in one instruction. */
228 virtual ULONGEST read (CORE_ADDR memaddr, int len,
229 enum bfd_endian byte_order) = 0;
232 /* Instruction reader from real target. */
234 class instruction_reader : public abstract_instruction_reader
237 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
240 return read_code_unsigned_integer (memaddr, len, byte_order);
246 /* Analyze a prologue, looking for a recognizable stack frame
247 and frame pointer. Scan until we encounter a store that could
248 clobber the stack frame unexpectedly, or an unknown instruction. */
251 aarch64_analyze_prologue (struct gdbarch *gdbarch,
252 CORE_ADDR start, CORE_ADDR limit,
253 struct aarch64_prologue_cache *cache,
254 abstract_instruction_reader& reader)
256 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
258 /* Track X registers and D registers in prologue. */
259 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
261 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
262 regs[i] = pv_register (i, 0);
263 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
265 for (; start < limit; start += 4)
270 insn = reader.read (start, 4, byte_order_for_code);
272 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
275 if (inst.opcode->iclass == addsub_imm
276 && (inst.opcode->op == OP_ADD
277 || strcmp ("sub", inst.opcode->name) == 0))
279 unsigned rd = inst.operands[0].reg.regno;
280 unsigned rn = inst.operands[1].reg.regno;
282 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
283 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
284 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
285 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
287 if (inst.opcode->op == OP_ADD)
289 regs[rd] = pv_add_constant (regs[rn],
290 inst.operands[2].imm.value);
294 regs[rd] = pv_add_constant (regs[rn],
295 -inst.operands[2].imm.value);
298 else if (inst.opcode->iclass == pcreladdr
299 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
301 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
302 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
304 regs[inst.operands[0].reg.regno] = pv_unknown ();
306 else if (inst.opcode->iclass == branch_imm)
308 /* Stop analysis on branch. */
311 else if (inst.opcode->iclass == condbranch)
313 /* Stop analysis on branch. */
316 else if (inst.opcode->iclass == branch_reg)
318 /* Stop analysis on branch. */
321 else if (inst.opcode->iclass == compbranch)
323 /* Stop analysis on branch. */
326 else if (inst.opcode->op == OP_MOVZ)
328 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
329 regs[inst.operands[0].reg.regno] = pv_unknown ();
331 else if (inst.opcode->iclass == log_shift
332 && strcmp (inst.opcode->name, "orr") == 0)
334 unsigned rd = inst.operands[0].reg.regno;
335 unsigned rn = inst.operands[1].reg.regno;
336 unsigned rm = inst.operands[2].reg.regno;
338 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
339 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
340 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
342 if (inst.operands[2].shifter.amount == 0
343 && rn == AARCH64_SP_REGNUM)
349 debug_printf ("aarch64: prologue analysis gave up "
350 "addr=%s opcode=0x%x (orr x register)\n",
351 core_addr_to_string_nz (start), insn);
356 else if (inst.opcode->op == OP_STUR)
358 unsigned rt = inst.operands[0].reg.regno;
359 unsigned rn = inst.operands[1].addr.base_regno;
361 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
363 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
365 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
366 gdb_assert (!inst.operands[1].addr.offset.is_reg);
368 stack.store (pv_add_constant (regs[rn],
369 inst.operands[1].addr.offset.imm),
370 is64 ? 8 : 4, regs[rt]);
372 else if ((inst.opcode->iclass == ldstpair_off
373 || (inst.opcode->iclass == ldstpair_indexed
374 && inst.operands[2].addr.preind))
375 && strcmp ("stp", inst.opcode->name) == 0)
377 /* STP with addressing mode Pre-indexed and Base register. */
380 unsigned rn = inst.operands[2].addr.base_regno;
381 int32_t imm = inst.operands[2].addr.offset.imm;
383 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
384 || inst.operands[0].type == AARCH64_OPND_Ft);
385 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
386 || inst.operands[1].type == AARCH64_OPND_Ft2);
387 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
388 gdb_assert (!inst.operands[2].addr.offset.is_reg);
390 /* If recording this store would invalidate the store area
391 (perhaps because rn is not known) then we should abandon
392 further prologue analysis. */
393 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
396 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
399 rt1 = inst.operands[0].reg.regno;
400 rt2 = inst.operands[1].reg.regno;
401 if (inst.operands[0].type == AARCH64_OPND_Ft)
403 /* Only bottom 64-bit of each V register (D register) need
405 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
406 rt1 += AARCH64_X_REGISTER_COUNT;
407 rt2 += AARCH64_X_REGISTER_COUNT;
410 stack.store (pv_add_constant (regs[rn], imm), 8,
412 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
415 if (inst.operands[2].addr.writeback)
416 regs[rn] = pv_add_constant (regs[rn], imm);
419 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
420 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
421 && (inst.opcode->op == OP_STR_POS
422 || inst.opcode->op == OP_STRF_POS)))
423 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
424 && strcmp ("str", inst.opcode->name) == 0)
426 /* STR (immediate) */
427 unsigned int rt = inst.operands[0].reg.regno;
428 int32_t imm = inst.operands[1].addr.offset.imm;
429 unsigned int rn = inst.operands[1].addr.base_regno;
431 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
432 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
433 || inst.operands[0].type == AARCH64_OPND_Ft);
435 if (inst.operands[0].type == AARCH64_OPND_Ft)
437 /* Only bottom 64-bit of each V register (D register) need
439 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
440 rt += AARCH64_X_REGISTER_COUNT;
443 stack.store (pv_add_constant (regs[rn], imm),
444 is64 ? 8 : 4, regs[rt]);
445 if (inst.operands[1].addr.writeback)
446 regs[rn] = pv_add_constant (regs[rn], imm);
448 else if (inst.opcode->iclass == testbranch)
450 /* Stop analysis on branch. */
457 debug_printf ("aarch64: prologue analysis gave up addr=%s"
459 core_addr_to_string_nz (start), insn);
468 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
470 /* Frame pointer is fp. Frame size is constant. */
471 cache->framereg = AARCH64_FP_REGNUM;
472 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
474 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
476 /* Try the stack pointer. */
477 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
478 cache->framereg = AARCH64_SP_REGNUM;
482 /* We're just out of luck. We don't know where the frame is. */
483 cache->framereg = -1;
484 cache->framesize = 0;
487 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
491 if (stack.find_reg (gdbarch, i, &offset))
492 cache->saved_regs[i].addr = offset;
495 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
497 int regnum = gdbarch_num_regs (gdbarch);
500 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
502 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
509 aarch64_analyze_prologue (struct gdbarch *gdbarch,
510 CORE_ADDR start, CORE_ADDR limit,
511 struct aarch64_prologue_cache *cache)
513 instruction_reader reader;
515 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
521 namespace selftests {
523 /* Instruction reader from manually cooked instruction sequences. */
525 class instruction_reader_test : public abstract_instruction_reader
528 template<size_t SIZE>
529 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
530 : m_insns (insns), m_insns_size (SIZE)
533 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
536 SELF_CHECK (len == 4);
537 SELF_CHECK (memaddr % 4 == 0);
538 SELF_CHECK (memaddr / 4 < m_insns_size);
540 return m_insns[memaddr / 4];
544 const uint32_t *m_insns;
549 aarch64_analyze_prologue_test (void)
551 struct gdbarch_info info;
553 gdbarch_info_init (&info);
554 info.bfd_arch_info = bfd_scan_arch ("aarch64");
556 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
557 SELF_CHECK (gdbarch != NULL);
559 /* Test the simple prologue in which frame pointer is used. */
561 struct aarch64_prologue_cache cache;
562 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
564 static const uint32_t insns[] = {
565 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
566 0x910003fd, /* mov x29, sp */
567 0x97ffffe6, /* bl 0x400580 */
569 instruction_reader_test reader (insns);
571 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
572 SELF_CHECK (end == 4 * 2);
574 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
575 SELF_CHECK (cache.framesize == 272);
577 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
579 if (i == AARCH64_FP_REGNUM)
580 SELF_CHECK (cache.saved_regs[i].addr == -272);
581 else if (i == AARCH64_LR_REGNUM)
582 SELF_CHECK (cache.saved_regs[i].addr == -264);
584 SELF_CHECK (cache.saved_regs[i].addr == -1);
587 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
589 int regnum = gdbarch_num_regs (gdbarch);
591 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
596 /* Test a prologue in which STR is used and frame pointer is not
599 struct aarch64_prologue_cache cache;
600 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
602 static const uint32_t insns[] = {
603 0xf81d0ff3, /* str x19, [sp, #-48]! */
604 0xb9002fe0, /* str w0, [sp, #44] */
605 0xf90013e1, /* str x1, [sp, #32]*/
606 0xfd000fe0, /* str d0, [sp, #24] */
607 0xaa0203f3, /* mov x19, x2 */
608 0xf94013e0, /* ldr x0, [sp, #32] */
610 instruction_reader_test reader (insns);
612 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
614 SELF_CHECK (end == 4 * 5);
616 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
617 SELF_CHECK (cache.framesize == 48);
619 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
622 SELF_CHECK (cache.saved_regs[i].addr == -16);
624 SELF_CHECK (cache.saved_regs[i].addr == -48);
626 SELF_CHECK (cache.saved_regs[i].addr == -1);
629 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
631 int regnum = gdbarch_num_regs (gdbarch);
634 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
637 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
642 } // namespace selftests
643 #endif /* GDB_SELF_TEST */
645 /* Implement the "skip_prologue" gdbarch method. */
648 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
650 CORE_ADDR func_addr, limit_pc;
652 /* See if we can determine the end of the prologue via the symbol
653 table. If so, then return either PC, or the PC after the
654 prologue, whichever is greater. */
655 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
657 CORE_ADDR post_prologue_pc
658 = skip_prologue_using_sal (gdbarch, func_addr);
660 if (post_prologue_pc != 0)
661 return std::max (pc, post_prologue_pc);
664 /* Can't determine prologue from the symbol table, need to examine
667 /* Find an upper limit on the function prologue using the debug
668 information. If the debug information could not be used to
669 provide that bound, then use an arbitrary large number as the
671 limit_pc = skip_prologue_using_sal (gdbarch, pc);
673 limit_pc = pc + 128; /* Magic. */
675 /* Try disassembling prologue. */
676 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
679 /* Scan the function prologue for THIS_FRAME and populate the prologue
683 aarch64_scan_prologue (struct frame_info *this_frame,
684 struct aarch64_prologue_cache *cache)
686 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
687 CORE_ADDR prologue_start;
688 CORE_ADDR prologue_end;
689 CORE_ADDR prev_pc = get_frame_pc (this_frame);
690 struct gdbarch *gdbarch = get_frame_arch (this_frame);
692 cache->prev_pc = prev_pc;
694 /* Assume we do not find a frame. */
695 cache->framereg = -1;
696 cache->framesize = 0;
698 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
701 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
705 /* No line info so use the current PC. */
706 prologue_end = prev_pc;
708 else if (sal.end < prologue_end)
710 /* The next line begins after the function end. */
711 prologue_end = sal.end;
714 prologue_end = std::min (prologue_end, prev_pc);
715 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
721 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
725 cache->framereg = AARCH64_FP_REGNUM;
726 cache->framesize = 16;
727 cache->saved_regs[29].addr = 0;
728 cache->saved_regs[30].addr = 8;
732 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
733 function may throw an exception if the inferior's registers or memory is
737 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
738 struct aarch64_prologue_cache *cache)
740 CORE_ADDR unwound_fp;
743 aarch64_scan_prologue (this_frame, cache);
745 if (cache->framereg == -1)
748 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
752 cache->prev_sp = unwound_fp + cache->framesize;
754 /* Calculate actual addresses of saved registers using offsets
755 determined by aarch64_analyze_prologue. */
756 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
757 if (trad_frame_addr_p (cache->saved_regs, reg))
758 cache->saved_regs[reg].addr += cache->prev_sp;
760 cache->func = get_frame_func (this_frame);
762 cache->available_p = 1;
765 /* Allocate and fill in *THIS_CACHE with information about the prologue of
766 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
767 Return a pointer to the current aarch64_prologue_cache in
770 static struct aarch64_prologue_cache *
771 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
773 struct aarch64_prologue_cache *cache;
775 if (*this_cache != NULL)
776 return (struct aarch64_prologue_cache *) *this_cache;
778 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
779 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
784 aarch64_make_prologue_cache_1 (this_frame, cache);
786 CATCH (ex, RETURN_MASK_ERROR)
788 if (ex.error != NOT_AVAILABLE_ERROR)
789 throw_exception (ex);
796 /* Implement the "stop_reason" frame_unwind method. */
798 static enum unwind_stop_reason
799 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
802 struct aarch64_prologue_cache *cache
803 = aarch64_make_prologue_cache (this_frame, this_cache);
805 if (!cache->available_p)
806 return UNWIND_UNAVAILABLE;
808 /* Halt the backtrace at "_start". */
809 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
810 return UNWIND_OUTERMOST;
812 /* We've hit a wall, stop. */
813 if (cache->prev_sp == 0)
814 return UNWIND_OUTERMOST;
816 return UNWIND_NO_REASON;
819 /* Our frame ID for a normal frame is the current function's starting
820 PC and the caller's SP when we were called. */
823 aarch64_prologue_this_id (struct frame_info *this_frame,
824 void **this_cache, struct frame_id *this_id)
826 struct aarch64_prologue_cache *cache
827 = aarch64_make_prologue_cache (this_frame, this_cache);
829 if (!cache->available_p)
830 *this_id = frame_id_build_unavailable_stack (cache->func);
832 *this_id = frame_id_build (cache->prev_sp, cache->func);
835 /* Implement the "prev_register" frame_unwind method. */
837 static struct value *
838 aarch64_prologue_prev_register (struct frame_info *this_frame,
839 void **this_cache, int prev_regnum)
841 struct aarch64_prologue_cache *cache
842 = aarch64_make_prologue_cache (this_frame, this_cache);
844 /* If we are asked to unwind the PC, then we need to return the LR
845 instead. The prologue may save PC, but it will point into this
846 frame's prologue, not the next frame's resume location. */
847 if (prev_regnum == AARCH64_PC_REGNUM)
851 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
852 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
855 /* SP is generally not saved to the stack, but this frame is
856 identified by the next frame's stack pointer at the time of the
857 call. The value was already reconstructed into PREV_SP. */
870 if (prev_regnum == AARCH64_SP_REGNUM)
871 return frame_unwind_got_constant (this_frame, prev_regnum,
874 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
878 /* AArch64 prologue unwinder. */
879 struct frame_unwind aarch64_prologue_unwind =
882 aarch64_prologue_frame_unwind_stop_reason,
883 aarch64_prologue_this_id,
884 aarch64_prologue_prev_register,
886 default_frame_sniffer
889 /* Allocate and fill in *THIS_CACHE with information about the prologue of
890 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
891 Return a pointer to the current aarch64_prologue_cache in
894 static struct aarch64_prologue_cache *
895 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
897 struct aarch64_prologue_cache *cache;
899 if (*this_cache != NULL)
900 return (struct aarch64_prologue_cache *) *this_cache;
902 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
903 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
908 cache->prev_sp = get_frame_register_unsigned (this_frame,
910 cache->prev_pc = get_frame_pc (this_frame);
911 cache->available_p = 1;
913 CATCH (ex, RETURN_MASK_ERROR)
915 if (ex.error != NOT_AVAILABLE_ERROR)
916 throw_exception (ex);
923 /* Implement the "stop_reason" frame_unwind method. */
925 static enum unwind_stop_reason
926 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
929 struct aarch64_prologue_cache *cache
930 = aarch64_make_stub_cache (this_frame, this_cache);
932 if (!cache->available_p)
933 return UNWIND_UNAVAILABLE;
935 return UNWIND_NO_REASON;
938 /* Our frame ID for a stub frame is the current SP and LR. */
941 aarch64_stub_this_id (struct frame_info *this_frame,
942 void **this_cache, struct frame_id *this_id)
944 struct aarch64_prologue_cache *cache
945 = aarch64_make_stub_cache (this_frame, this_cache);
947 if (cache->available_p)
948 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
950 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
953 /* Implement the "sniffer" frame_unwind method. */
956 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
957 struct frame_info *this_frame,
958 void **this_prologue_cache)
960 CORE_ADDR addr_in_block;
963 addr_in_block = get_frame_address_in_block (this_frame);
964 if (in_plt_section (addr_in_block)
965 /* We also use the stub winder if the target memory is unreadable
966 to avoid having the prologue unwinder trying to read it. */
967 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
973 /* AArch64 stub unwinder. */
974 struct frame_unwind aarch64_stub_unwind =
977 aarch64_stub_frame_unwind_stop_reason,
978 aarch64_stub_this_id,
979 aarch64_prologue_prev_register,
981 aarch64_stub_unwind_sniffer
984 /* Return the frame base address of *THIS_FRAME. */
987 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
989 struct aarch64_prologue_cache *cache
990 = aarch64_make_prologue_cache (this_frame, this_cache);
992 return cache->prev_sp - cache->framesize;
995 /* AArch64 default frame base information. */
996 struct frame_base aarch64_normal_base =
998 &aarch64_prologue_unwind,
999 aarch64_normal_frame_base,
1000 aarch64_normal_frame_base,
1001 aarch64_normal_frame_base
1004 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
1005 dummy frame. The frame ID's base needs to match the TOS value
1006 saved by save_dummy_frame_tos () and returned from
1007 aarch64_push_dummy_call, and the PC needs to match the dummy
1008 frame's breakpoint. */
1010 static struct frame_id
1011 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1013 return frame_id_build (get_frame_register_unsigned (this_frame,
1015 get_frame_pc (this_frame));
1018 /* Implement the "unwind_pc" gdbarch method. */
1021 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1024 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1029 /* Implement the "unwind_sp" gdbarch method. */
1032 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1034 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1037 /* Return the value of the REGNUM register in the previous frame of
1040 static struct value *
1041 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1042 void **this_cache, int regnum)
1048 case AARCH64_PC_REGNUM:
1049 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1050 return frame_unwind_got_constant (this_frame, regnum, lr);
1053 internal_error (__FILE__, __LINE__,
1054 _("Unexpected register %d"), regnum);
1058 /* Implement the "init_reg" dwarf2_frame_ops method. */
1061 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1062 struct dwarf2_frame_state_reg *reg,
1063 struct frame_info *this_frame)
1067 case AARCH64_PC_REGNUM:
1068 reg->how = DWARF2_FRAME_REG_FN;
1069 reg->loc.fn = aarch64_dwarf2_prev_register;
1071 case AARCH64_SP_REGNUM:
1072 reg->how = DWARF2_FRAME_REG_CFA;
1077 /* When arguments must be pushed onto the stack, they go on in reverse
1078 order. The code below implements a FILO (stack) to do this. */
1082 /* Value to pass on stack. It can be NULL if this item is for stack
1084 const gdb_byte *data;
1086 /* Size in bytes of value to pass on stack. */
1090 DEF_VEC_O (stack_item_t);
1092 /* Return the alignment (in bytes) of the given type. */
1095 aarch64_type_align (struct type *t)
1101 t = check_typedef (t);
1102 switch (TYPE_CODE (t))
1105 /* Should never happen. */
1106 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1110 case TYPE_CODE_ENUM:
1114 case TYPE_CODE_RANGE:
1115 case TYPE_CODE_BITSTRING:
1117 case TYPE_CODE_RVALUE_REF:
1118 case TYPE_CODE_CHAR:
1119 case TYPE_CODE_BOOL:
1120 return TYPE_LENGTH (t);
1122 case TYPE_CODE_ARRAY:
1123 if (TYPE_VECTOR (t))
1125 /* Use the natural alignment for vector types (the same for
1126 scalar type), but the maximum alignment is 128-bit. */
1127 if (TYPE_LENGTH (t) > 16)
1130 return TYPE_LENGTH (t);
1133 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1134 case TYPE_CODE_COMPLEX:
1135 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1137 case TYPE_CODE_STRUCT:
1138 case TYPE_CODE_UNION:
1140 for (n = 0; n < TYPE_NFIELDS (t); n++)
1142 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1150 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1151 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1152 document; otherwise return 0. */
1155 is_hfa_or_hva (struct type *ty)
1157 switch (TYPE_CODE (ty))
1159 case TYPE_CODE_ARRAY:
1161 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1163 if (TYPE_VECTOR (ty))
1166 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1167 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1168 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1169 && TYPE_VECTOR (target_ty))))
1174 case TYPE_CODE_UNION:
1175 case TYPE_CODE_STRUCT:
1177 /* HFA or HVA has at most four members. */
1178 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1180 struct type *member0_type;
1182 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1183 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1184 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1185 && TYPE_VECTOR (member0_type)))
1189 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1191 struct type *member1_type;
1193 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1194 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1195 || (TYPE_LENGTH (member0_type)
1196 != TYPE_LENGTH (member1_type)))
1212 /* AArch64 function call information structure. */
1213 struct aarch64_call_info
1215 /* the current argument number. */
1218 /* The next general purpose register number, equivalent to NGRN as
1219 described in the AArch64 Procedure Call Standard. */
1222 /* The next SIMD and floating point register number, equivalent to
1223 NSRN as described in the AArch64 Procedure Call Standard. */
1226 /* The next stacked argument address, equivalent to NSAA as
1227 described in the AArch64 Procedure Call Standard. */
1230 /* Stack item vector. */
1231 VEC(stack_item_t) *si;
1234 /* Pass a value in a sequence of consecutive X registers. The caller
1235 is responsbile for ensuring sufficient registers are available. */
1238 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1239 struct aarch64_call_info *info, struct type *type,
1242 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1243 int len = TYPE_LENGTH (type);
1244 enum type_code typecode = TYPE_CODE (type);
1245 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1246 const bfd_byte *buf = value_contents (arg);
1252 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1253 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1257 /* Adjust sub-word struct/union args when big-endian. */
1258 if (byte_order == BFD_ENDIAN_BIG
1259 && partial_len < X_REGISTER_SIZE
1260 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1261 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1265 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1266 gdbarch_register_name (gdbarch, regnum),
1267 phex (regval, X_REGISTER_SIZE));
1269 regcache_cooked_write_unsigned (regcache, regnum, regval);
1276 /* Attempt to marshall a value in a V register. Return 1 if
1277 successful, or 0 if insufficient registers are available. This
1278 function, unlike the equivalent pass_in_x() function does not
1279 handle arguments spread across multiple registers. */
1282 pass_in_v (struct gdbarch *gdbarch,
1283 struct regcache *regcache,
1284 struct aarch64_call_info *info,
1285 int len, const bfd_byte *buf)
1289 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1290 gdb_byte reg[V_REGISTER_SIZE];
1295 memset (reg, 0, sizeof (reg));
1296 /* PCS C.1, the argument is allocated to the least significant
1297 bits of V register. */
1298 memcpy (reg, buf, len);
1299 regcache->cooked_write (regnum, reg);
1303 debug_printf ("arg %d in %s\n", info->argnum,
1304 gdbarch_register_name (gdbarch, regnum));
1312 /* Marshall an argument onto the stack. */
1315 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1318 const bfd_byte *buf = value_contents (arg);
1319 int len = TYPE_LENGTH (type);
1325 align = aarch64_type_align (type);
1327 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1328 Natural alignment of the argument's type. */
1329 align = align_up (align, 8);
1331 /* The AArch64 PCS requires at most doubleword alignment. */
1337 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1343 VEC_safe_push (stack_item_t, info->si, &item);
1346 if (info->nsaa & (align - 1))
1348 /* Push stack alignment padding. */
1349 int pad = align - (info->nsaa & (align - 1));
1354 VEC_safe_push (stack_item_t, info->si, &item);
1359 /* Marshall an argument into a sequence of one or more consecutive X
1360 registers or, if insufficient X registers are available then onto
1364 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1365 struct aarch64_call_info *info, struct type *type,
1368 int len = TYPE_LENGTH (type);
1369 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1371 /* PCS C.13 - Pass in registers if we have enough spare */
1372 if (info->ngrn + nregs <= 8)
1374 pass_in_x (gdbarch, regcache, info, type, arg);
1375 info->ngrn += nregs;
1380 pass_on_stack (info, type, arg);
1384 /* Pass a value in a V register, or on the stack if insufficient are
1388 pass_in_v_or_stack (struct gdbarch *gdbarch,
1389 struct regcache *regcache,
1390 struct aarch64_call_info *info,
1394 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1395 value_contents (arg)))
1396 pass_on_stack (info, type, arg);
1399 /* Implement the "push_dummy_call" gdbarch method. */
1402 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1403 struct regcache *regcache, CORE_ADDR bp_addr,
1405 struct value **args, CORE_ADDR sp, int struct_return,
1406 CORE_ADDR struct_addr)
1409 struct aarch64_call_info info;
1410 struct type *func_type;
1411 struct type *return_type;
1412 int lang_struct_return;
1414 memset (&info, 0, sizeof (info));
1416 /* We need to know what the type of the called function is in order
1417 to determine the number of named/anonymous arguments for the
1418 actual argument placement, and the return type in order to handle
1419 return value correctly.
1421 The generic code above us views the decision of return in memory
1422 or return in registers as a two stage processes. The language
1423 handler is consulted first and may decide to return in memory (eg
1424 class with copy constructor returned by value), this will cause
1425 the generic code to allocate space AND insert an initial leading
1428 If the language code does not decide to pass in memory then the
1429 target code is consulted.
1431 If the language code decides to pass in memory we want to move
1432 the pointer inserted as the initial argument from the argument
1433 list and into X8, the conventional AArch64 struct return pointer
1436 This is slightly awkward, ideally the flag "lang_struct_return"
1437 would be passed to the targets implementation of push_dummy_call.
1438 Rather that change the target interface we call the language code
1439 directly ourselves. */
1441 func_type = check_typedef (value_type (function));
1443 /* Dereference function pointer types. */
1444 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1445 func_type = TYPE_TARGET_TYPE (func_type);
1447 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1448 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1450 /* If language_pass_by_reference () returned true we will have been
1451 given an additional initial argument, a hidden pointer to the
1452 return slot in memory. */
1453 return_type = TYPE_TARGET_TYPE (func_type);
1454 lang_struct_return = language_pass_by_reference (return_type);
1456 /* Set the return address. For the AArch64, the return breakpoint
1457 is always at BP_ADDR. */
1458 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1460 /* If we were given an initial argument for the return slot because
1461 lang_struct_return was true, lose it. */
1462 if (lang_struct_return)
1468 /* The struct_return pointer occupies X8. */
1469 if (struct_return || lang_struct_return)
1473 debug_printf ("struct return in %s = 0x%s\n",
1474 gdbarch_register_name (gdbarch,
1475 AARCH64_STRUCT_RETURN_REGNUM),
1476 paddress (gdbarch, struct_addr));
1478 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1482 for (argnum = 0; argnum < nargs; argnum++)
1484 struct value *arg = args[argnum];
1485 struct type *arg_type;
1488 arg_type = check_typedef (value_type (arg));
1489 len = TYPE_LENGTH (arg_type);
1491 switch (TYPE_CODE (arg_type))
1494 case TYPE_CODE_BOOL:
1495 case TYPE_CODE_CHAR:
1496 case TYPE_CODE_RANGE:
1497 case TYPE_CODE_ENUM:
1500 /* Promote to 32 bit integer. */
1501 if (TYPE_UNSIGNED (arg_type))
1502 arg_type = builtin_type (gdbarch)->builtin_uint32;
1504 arg_type = builtin_type (gdbarch)->builtin_int32;
1505 arg = value_cast (arg_type, arg);
1507 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1510 case TYPE_CODE_COMPLEX:
1513 const bfd_byte *buf = value_contents (arg);
1514 struct type *target_type =
1515 check_typedef (TYPE_TARGET_TYPE (arg_type));
1517 pass_in_v (gdbarch, regcache, &info,
1518 TYPE_LENGTH (target_type), buf);
1519 pass_in_v (gdbarch, regcache, &info,
1520 TYPE_LENGTH (target_type),
1521 buf + TYPE_LENGTH (target_type));
1526 pass_on_stack (&info, arg_type, arg);
1530 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1533 case TYPE_CODE_STRUCT:
1534 case TYPE_CODE_ARRAY:
1535 case TYPE_CODE_UNION:
1536 if (is_hfa_or_hva (arg_type))
1538 int elements = TYPE_NFIELDS (arg_type);
1540 /* Homogeneous Aggregates */
1541 if (info.nsrn + elements < 8)
1545 for (i = 0; i < elements; i++)
1547 /* We know that we have sufficient registers
1548 available therefore this will never fallback
1550 struct value *field =
1551 value_primitive_field (arg, 0, i, arg_type);
1552 struct type *field_type =
1553 check_typedef (value_type (field));
1555 pass_in_v_or_stack (gdbarch, regcache, &info,
1562 pass_on_stack (&info, arg_type, arg);
1565 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1566 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1568 /* Short vector types are passed in V registers. */
1569 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1573 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1574 invisible reference. */
1576 /* Allocate aligned storage. */
1577 sp = align_down (sp - len, 16);
1579 /* Write the real data into the stack. */
1580 write_memory (sp, value_contents (arg), len);
1582 /* Construct the indirection. */
1583 arg_type = lookup_pointer_type (arg_type);
1584 arg = value_from_pointer (arg_type, sp);
1585 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1588 /* PCS C.15 / C.18 multiple values pass. */
1589 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1593 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1598 /* Make sure stack retains 16 byte alignment. */
1600 sp -= 16 - (info.nsaa & 15);
1602 while (!VEC_empty (stack_item_t, info.si))
1604 stack_item_t *si = VEC_last (stack_item_t, info.si);
1607 if (si->data != NULL)
1608 write_memory (sp, si->data, si->len);
1609 VEC_pop (stack_item_t, info.si);
1612 VEC_free (stack_item_t, info.si);
1614 /* Finally, update the SP register. */
1615 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1620 /* Implement the "frame_align" gdbarch method. */
1623 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1625 /* Align the stack to sixteen bytes. */
1626 return sp & ~(CORE_ADDR) 15;
1629 /* Return the type for an AdvSISD Q register. */
1631 static struct type *
1632 aarch64_vnq_type (struct gdbarch *gdbarch)
1634 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1636 if (tdep->vnq_type == NULL)
1641 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1644 elem = builtin_type (gdbarch)->builtin_uint128;
1645 append_composite_type_field (t, "u", elem);
1647 elem = builtin_type (gdbarch)->builtin_int128;
1648 append_composite_type_field (t, "s", elem);
1653 return tdep->vnq_type;
1656 /* Return the type for an AdvSISD D register. */
1658 static struct type *
1659 aarch64_vnd_type (struct gdbarch *gdbarch)
1661 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1663 if (tdep->vnd_type == NULL)
1668 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1671 elem = builtin_type (gdbarch)->builtin_double;
1672 append_composite_type_field (t, "f", elem);
1674 elem = builtin_type (gdbarch)->builtin_uint64;
1675 append_composite_type_field (t, "u", elem);
1677 elem = builtin_type (gdbarch)->builtin_int64;
1678 append_composite_type_field (t, "s", elem);
1683 return tdep->vnd_type;
1686 /* Return the type for an AdvSISD S register. */
1688 static struct type *
1689 aarch64_vns_type (struct gdbarch *gdbarch)
1691 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1693 if (tdep->vns_type == NULL)
1698 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1701 elem = builtin_type (gdbarch)->builtin_float;
1702 append_composite_type_field (t, "f", elem);
1704 elem = builtin_type (gdbarch)->builtin_uint32;
1705 append_composite_type_field (t, "u", elem);
1707 elem = builtin_type (gdbarch)->builtin_int32;
1708 append_composite_type_field (t, "s", elem);
1713 return tdep->vns_type;
1716 /* Return the type for an AdvSISD H register. */
1718 static struct type *
1719 aarch64_vnh_type (struct gdbarch *gdbarch)
1721 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1723 if (tdep->vnh_type == NULL)
1728 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1731 elem = builtin_type (gdbarch)->builtin_uint16;
1732 append_composite_type_field (t, "u", elem);
1734 elem = builtin_type (gdbarch)->builtin_int16;
1735 append_composite_type_field (t, "s", elem);
1740 return tdep->vnh_type;
1743 /* Return the type for an AdvSISD B register. */
1745 static struct type *
1746 aarch64_vnb_type (struct gdbarch *gdbarch)
1748 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1750 if (tdep->vnb_type == NULL)
1755 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1758 elem = builtin_type (gdbarch)->builtin_uint8;
1759 append_composite_type_field (t, "u", elem);
1761 elem = builtin_type (gdbarch)->builtin_int8;
1762 append_composite_type_field (t, "s", elem);
1767 return tdep->vnb_type;
1770 /* Return the type for an AdvSISD V register. */
1772 static struct type *
1773 aarch64_vnv_type (struct gdbarch *gdbarch)
1775 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1777 if (tdep->vnv_type == NULL)
1779 struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
1782 append_composite_type_field (t, "d", aarch64_vnd_type (gdbarch));
1783 append_composite_type_field (t, "s", aarch64_vns_type (gdbarch));
1784 append_composite_type_field (t, "h", aarch64_vnh_type (gdbarch));
1785 append_composite_type_field (t, "b", aarch64_vnb_type (gdbarch));
1786 append_composite_type_field (t, "q", aarch64_vnq_type (gdbarch));
1791 return tdep->vnv_type;
1794 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1797 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1799 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1800 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1802 if (reg == AARCH64_DWARF_SP)
1803 return AARCH64_SP_REGNUM;
1805 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1806 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1812 /* Implement the "print_insn" gdbarch method. */
1815 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1817 info->symbols = NULL;
1818 return default_print_insn (memaddr, info);
1821 /* AArch64 BRK software debug mode instruction.
1822 Note that AArch64 code is always little-endian.
1823 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1824 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1826 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1828 /* Extract from an array REGS containing the (raw) register state a
1829 function return value of type TYPE, and copy that, in virtual
1830 format, into VALBUF. */
1833 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1836 struct gdbarch *gdbarch = regs->arch ();
1837 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1839 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1841 bfd_byte buf[V_REGISTER_SIZE];
1842 int len = TYPE_LENGTH (type);
1844 regs->cooked_read (AARCH64_V0_REGNUM, buf);
1845 memcpy (valbuf, buf, len);
1847 else if (TYPE_CODE (type) == TYPE_CODE_INT
1848 || TYPE_CODE (type) == TYPE_CODE_CHAR
1849 || TYPE_CODE (type) == TYPE_CODE_BOOL
1850 || TYPE_CODE (type) == TYPE_CODE_PTR
1851 || TYPE_IS_REFERENCE (type)
1852 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1854 /* If the the type is a plain integer, then the access is
1855 straight-forward. Otherwise we have to play around a bit
1857 int len = TYPE_LENGTH (type);
1858 int regno = AARCH64_X0_REGNUM;
1863 /* By using store_unsigned_integer we avoid having to do
1864 anything special for small big-endian values. */
1865 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1866 store_unsigned_integer (valbuf,
1867 (len > X_REGISTER_SIZE
1868 ? X_REGISTER_SIZE : len), byte_order, tmp);
1869 len -= X_REGISTER_SIZE;
1870 valbuf += X_REGISTER_SIZE;
1873 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1875 int regno = AARCH64_V0_REGNUM;
1876 bfd_byte buf[V_REGISTER_SIZE];
1877 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1878 int len = TYPE_LENGTH (target_type);
1880 regs->cooked_read (regno, buf);
1881 memcpy (valbuf, buf, len);
1883 regs->cooked_read (regno + 1, buf);
1884 memcpy (valbuf, buf, len);
1887 else if (is_hfa_or_hva (type))
1889 int elements = TYPE_NFIELDS (type);
1890 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1891 int len = TYPE_LENGTH (member_type);
1894 for (i = 0; i < elements; i++)
1896 int regno = AARCH64_V0_REGNUM + i;
1897 bfd_byte buf[V_REGISTER_SIZE];
1901 debug_printf ("read HFA or HVA return value element %d from %s\n",
1903 gdbarch_register_name (gdbarch, regno));
1905 regs->cooked_read (regno, buf);
1907 memcpy (valbuf, buf, len);
1911 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1912 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1914 /* Short vector is returned in V register. */
1915 gdb_byte buf[V_REGISTER_SIZE];
1917 regs->cooked_read (AARCH64_V0_REGNUM, buf);
1918 memcpy (valbuf, buf, TYPE_LENGTH (type));
1922 /* For a structure or union the behaviour is as if the value had
1923 been stored to word-aligned memory and then loaded into
1924 registers with 64-bit load instruction(s). */
1925 int len = TYPE_LENGTH (type);
1926 int regno = AARCH64_X0_REGNUM;
1927 bfd_byte buf[X_REGISTER_SIZE];
1931 regs->cooked_read (regno++, buf);
1932 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1933 len -= X_REGISTER_SIZE;
1934 valbuf += X_REGISTER_SIZE;
1940 /* Will a function return an aggregate type in memory or in a
1941 register? Return 0 if an aggregate type can be returned in a
1942 register, 1 if it must be returned in memory. */
1945 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1947 type = check_typedef (type);
1949 if (is_hfa_or_hva (type))
1951 /* v0-v7 are used to return values and one register is allocated
1952 for one member. However, HFA or HVA has at most four members. */
1956 if (TYPE_LENGTH (type) > 16)
1958 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1959 invisible reference. */
1967 /* Write into appropriate registers a function return value of type
1968 TYPE, given in virtual format. */
1971 aarch64_store_return_value (struct type *type, struct regcache *regs,
1972 const gdb_byte *valbuf)
1974 struct gdbarch *gdbarch = regs->arch ();
1975 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1977 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1979 bfd_byte buf[V_REGISTER_SIZE];
1980 int len = TYPE_LENGTH (type);
1982 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1983 regs->cooked_write (AARCH64_V0_REGNUM, buf);
1985 else if (TYPE_CODE (type) == TYPE_CODE_INT
1986 || TYPE_CODE (type) == TYPE_CODE_CHAR
1987 || TYPE_CODE (type) == TYPE_CODE_BOOL
1988 || TYPE_CODE (type) == TYPE_CODE_PTR
1989 || TYPE_IS_REFERENCE (type)
1990 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1992 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1994 /* Values of one word or less are zero/sign-extended and
1996 bfd_byte tmpbuf[X_REGISTER_SIZE];
1997 LONGEST val = unpack_long (type, valbuf);
1999 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2000 regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2004 /* Integral values greater than one word are stored in
2005 consecutive registers starting with r0. This will always
2006 be a multiple of the regiser size. */
2007 int len = TYPE_LENGTH (type);
2008 int regno = AARCH64_X0_REGNUM;
2012 regs->cooked_write (regno++, valbuf);
2013 len -= X_REGISTER_SIZE;
2014 valbuf += X_REGISTER_SIZE;
2018 else if (is_hfa_or_hva (type))
2020 int elements = TYPE_NFIELDS (type);
2021 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
2022 int len = TYPE_LENGTH (member_type);
2025 for (i = 0; i < elements; i++)
2027 int regno = AARCH64_V0_REGNUM + i;
2028 bfd_byte tmpbuf[V_REGISTER_SIZE];
2032 debug_printf ("write HFA or HVA return value element %d to %s\n",
2034 gdbarch_register_name (gdbarch, regno));
2037 memcpy (tmpbuf, valbuf, len);
2038 regs->cooked_write (regno, tmpbuf);
2042 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2043 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
2046 gdb_byte buf[V_REGISTER_SIZE];
2048 memcpy (buf, valbuf, TYPE_LENGTH (type));
2049 regs->cooked_write (AARCH64_V0_REGNUM, buf);
2053 /* For a structure or union the behaviour is as if the value had
2054 been stored to word-aligned memory and then loaded into
2055 registers with 64-bit load instruction(s). */
2056 int len = TYPE_LENGTH (type);
2057 int regno = AARCH64_X0_REGNUM;
2058 bfd_byte tmpbuf[X_REGISTER_SIZE];
2062 memcpy (tmpbuf, valbuf,
2063 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2064 regs->cooked_write (regno++, tmpbuf);
2065 len -= X_REGISTER_SIZE;
2066 valbuf += X_REGISTER_SIZE;
2071 /* Implement the "return_value" gdbarch method. */
2073 static enum return_value_convention
2074 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2075 struct type *valtype, struct regcache *regcache,
2076 gdb_byte *readbuf, const gdb_byte *writebuf)
2079 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2080 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2081 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2083 if (aarch64_return_in_memory (gdbarch, valtype))
2086 debug_printf ("return value in memory\n");
2087 return RETURN_VALUE_STRUCT_CONVENTION;
2092 aarch64_store_return_value (valtype, regcache, writebuf);
2095 aarch64_extract_return_value (valtype, regcache, readbuf);
2098 debug_printf ("return value in registers\n");
2100 return RETURN_VALUE_REGISTER_CONVENTION;
2103 /* Implement the "get_longjmp_target" gdbarch method. */
2106 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2109 gdb_byte buf[X_REGISTER_SIZE];
2110 struct gdbarch *gdbarch = get_frame_arch (frame);
2111 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2112 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2114 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2116 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2120 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2124 /* Implement the "gen_return_address" gdbarch method. */
2127 aarch64_gen_return_address (struct gdbarch *gdbarch,
2128 struct agent_expr *ax, struct axs_value *value,
2131 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2132 value->kind = axs_lvalue_register;
2133 value->u.reg = AARCH64_LR_REGNUM;
2137 /* Return the pseudo register name corresponding to register regnum. */
2140 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2142 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2144 static const char *const q_name[] =
2146 "q0", "q1", "q2", "q3",
2147 "q4", "q5", "q6", "q7",
2148 "q8", "q9", "q10", "q11",
2149 "q12", "q13", "q14", "q15",
2150 "q16", "q17", "q18", "q19",
2151 "q20", "q21", "q22", "q23",
2152 "q24", "q25", "q26", "q27",
2153 "q28", "q29", "q30", "q31",
2156 static const char *const d_name[] =
2158 "d0", "d1", "d2", "d3",
2159 "d4", "d5", "d6", "d7",
2160 "d8", "d9", "d10", "d11",
2161 "d12", "d13", "d14", "d15",
2162 "d16", "d17", "d18", "d19",
2163 "d20", "d21", "d22", "d23",
2164 "d24", "d25", "d26", "d27",
2165 "d28", "d29", "d30", "d31",
2168 static const char *const s_name[] =
2170 "s0", "s1", "s2", "s3",
2171 "s4", "s5", "s6", "s7",
2172 "s8", "s9", "s10", "s11",
2173 "s12", "s13", "s14", "s15",
2174 "s16", "s17", "s18", "s19",
2175 "s20", "s21", "s22", "s23",
2176 "s24", "s25", "s26", "s27",
2177 "s28", "s29", "s30", "s31",
2180 static const char *const h_name[] =
2182 "h0", "h1", "h2", "h3",
2183 "h4", "h5", "h6", "h7",
2184 "h8", "h9", "h10", "h11",
2185 "h12", "h13", "h14", "h15",
2186 "h16", "h17", "h18", "h19",
2187 "h20", "h21", "h22", "h23",
2188 "h24", "h25", "h26", "h27",
2189 "h28", "h29", "h30", "h31",
2192 static const char *const b_name[] =
2194 "b0", "b1", "b2", "b3",
2195 "b4", "b5", "b6", "b7",
2196 "b8", "b9", "b10", "b11",
2197 "b12", "b13", "b14", "b15",
2198 "b16", "b17", "b18", "b19",
2199 "b20", "b21", "b22", "b23",
2200 "b24", "b25", "b26", "b27",
2201 "b28", "b29", "b30", "b31",
2204 regnum -= gdbarch_num_regs (gdbarch);
2206 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2207 return q_name[regnum - AARCH64_Q0_REGNUM];
2209 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2210 return d_name[regnum - AARCH64_D0_REGNUM];
2212 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2213 return s_name[regnum - AARCH64_S0_REGNUM];
2215 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2216 return h_name[regnum - AARCH64_H0_REGNUM];
2218 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2219 return b_name[regnum - AARCH64_B0_REGNUM];
2221 if (tdep->has_sve ())
2223 static const char *const sve_v_name[] =
2225 "v0", "v1", "v2", "v3",
2226 "v4", "v5", "v6", "v7",
2227 "v8", "v9", "v10", "v11",
2228 "v12", "v13", "v14", "v15",
2229 "v16", "v17", "v18", "v19",
2230 "v20", "v21", "v22", "v23",
2231 "v24", "v25", "v26", "v27",
2232 "v28", "v29", "v30", "v31",
2235 if (regnum >= AARCH64_SVE_V0_REGNUM
2236 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2237 return sve_v_name[regnum - AARCH64_SVE_V0_REGNUM];
2240 internal_error (__FILE__, __LINE__,
2241 _("aarch64_pseudo_register_name: bad register number %d"),
2245 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2247 static struct type *
2248 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2250 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2252 regnum -= gdbarch_num_regs (gdbarch);
2254 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2255 return aarch64_vnq_type (gdbarch);
2257 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2258 return aarch64_vnd_type (gdbarch);
2260 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2261 return aarch64_vns_type (gdbarch);
2263 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2264 return aarch64_vnh_type (gdbarch);
2266 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2267 return aarch64_vnb_type (gdbarch);
2269 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2270 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2271 return aarch64_vnv_type (gdbarch);
2273 internal_error (__FILE__, __LINE__,
2274 _("aarch64_pseudo_register_type: bad register number %d"),
2278 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2281 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2282 struct reggroup *group)
2284 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2286 regnum -= gdbarch_num_regs (gdbarch);
2288 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2289 return group == all_reggroup || group == vector_reggroup;
2290 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2291 return (group == all_reggroup || group == vector_reggroup
2292 || group == float_reggroup);
2293 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2294 return (group == all_reggroup || group == vector_reggroup
2295 || group == float_reggroup);
2296 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2297 return group == all_reggroup || group == vector_reggroup;
2298 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2299 return group == all_reggroup || group == vector_reggroup;
2300 else if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2301 && regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2302 return group == all_reggroup || group == vector_reggroup;
2304 return group == all_reggroup;
2307 /* Helper for aarch64_pseudo_read_value. */
2309 static struct value *
2310 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2311 readable_regcache *regcache, int regnum_offset,
2312 int regsize, struct value *result_value)
2314 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2316 /* Enough space for a full vector register. */
2317 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2318 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2320 if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2321 mark_value_bytes_unavailable (result_value, 0,
2322 TYPE_LENGTH (value_type (result_value)));
2324 memcpy (value_contents_raw (result_value), reg_buf, regsize);
2326 return result_value;
2329 /* Implement the "pseudo_register_read_value" gdbarch method. */
2331 static struct value *
2332 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2335 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2336 struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2338 VALUE_LVAL (result_value) = lval_register;
2339 VALUE_REGNUM (result_value) = regnum;
2341 regnum -= gdbarch_num_regs (gdbarch);
2343 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2344 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2345 regnum - AARCH64_Q0_REGNUM,
2346 Q_REGISTER_SIZE, result_value);
2348 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2349 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2350 regnum - AARCH64_D0_REGNUM,
2351 D_REGISTER_SIZE, result_value);
2353 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2354 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2355 regnum - AARCH64_S0_REGNUM,
2356 S_REGISTER_SIZE, result_value);
2358 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2359 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2360 regnum - AARCH64_H0_REGNUM,
2361 H_REGISTER_SIZE, result_value);
2363 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2364 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2365 regnum - AARCH64_B0_REGNUM,
2366 B_REGISTER_SIZE, result_value);
2368 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2369 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2370 return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2371 regnum - AARCH64_SVE_V0_REGNUM,
2372 V_REGISTER_SIZE, result_value);
2374 gdb_assert_not_reached ("regnum out of bound");
2377 /* Helper for aarch64_pseudo_write. */
2380 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2381 int regnum_offset, int regsize, const gdb_byte *buf)
2383 unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2385 /* Enough space for a full vector register. */
2386 gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2387 gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2389 /* Ensure the register buffer is zero, we want gdb writes of the
2390 various 'scalar' pseudo registers to behavior like architectural
2391 writes, register width bytes are written the remainder are set to
2393 memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2395 memcpy (reg_buf, buf, regsize);
2396 regcache->raw_write (v_regnum, reg_buf);
2399 /* Implement the "pseudo_register_write" gdbarch method. */
2402 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2403 int regnum, const gdb_byte *buf)
2405 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2406 regnum -= gdbarch_num_regs (gdbarch);
2408 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2409 return aarch64_pseudo_write_1 (gdbarch, regcache,
2410 regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2413 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2414 return aarch64_pseudo_write_1 (gdbarch, regcache,
2415 regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2418 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2419 return aarch64_pseudo_write_1 (gdbarch, regcache,
2420 regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2423 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2424 return aarch64_pseudo_write_1 (gdbarch, regcache,
2425 regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2428 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2429 return aarch64_pseudo_write_1 (gdbarch, regcache,
2430 regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2433 if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2434 && regnum < AARCH64_SVE_V0_REGNUM + 32)
2435 return aarch64_pseudo_write_1 (gdbarch, regcache,
2436 regnum - AARCH64_SVE_V0_REGNUM,
2437 V_REGISTER_SIZE, buf);
2439 gdb_assert_not_reached ("regnum out of bound");
2442 /* Callback function for user_reg_add. */
2444 static struct value *
2445 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2447 const int *reg_p = (const int *) baton;
2449 return value_of_register (*reg_p, frame);
2453 /* Implement the "software_single_step" gdbarch method, needed to
2454 single step through atomic sequences on AArch64. */
2456 static std::vector<CORE_ADDR>
2457 aarch64_software_single_step (struct regcache *regcache)
2459 struct gdbarch *gdbarch = regcache->arch ();
2460 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2461 const int insn_size = 4;
2462 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2463 CORE_ADDR pc = regcache_read_pc (regcache);
2464 CORE_ADDR breaks[2] = { -1, -1 };
2466 CORE_ADDR closing_insn = 0;
2467 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2468 byte_order_for_code);
2471 int bc_insn_count = 0; /* Conditional branch instruction count. */
2472 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2475 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2478 /* Look for a Load Exclusive instruction which begins the sequence. */
2479 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2482 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2485 insn = read_memory_unsigned_integer (loc, insn_size,
2486 byte_order_for_code);
2488 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2490 /* Check if the instruction is a conditional branch. */
2491 if (inst.opcode->iclass == condbranch)
2493 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2495 if (bc_insn_count >= 1)
2498 /* It is, so we'll try to set a breakpoint at the destination. */
2499 breaks[1] = loc + inst.operands[0].imm.value;
2505 /* Look for the Store Exclusive which closes the atomic sequence. */
2506 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2513 /* We didn't find a closing Store Exclusive instruction, fall back. */
2517 /* Insert breakpoint after the end of the atomic sequence. */
2518 breaks[0] = loc + insn_size;
2520 /* Check for duplicated breakpoints, and also check that the second
2521 breakpoint is not within the atomic sequence. */
2523 && (breaks[1] == breaks[0]
2524 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2525 last_breakpoint = 0;
2527 std::vector<CORE_ADDR> next_pcs;
2529 /* Insert the breakpoint at the end of the sequence, and one at the
2530 destination of the conditional branch, if it exists. */
2531 for (index = 0; index <= last_breakpoint; index++)
2532 next_pcs.push_back (breaks[index]);
2537 struct aarch64_displaced_step_closure : public displaced_step_closure
2539 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2540 is being displaced stepping. */
2543 /* PC adjustment offset after displaced stepping. */
2544 int32_t pc_adjust = 0;
2547 /* Data when visiting instructions for displaced stepping. */
2549 struct aarch64_displaced_step_data
2551 struct aarch64_insn_data base;
2553 /* The address where the instruction will be executed at. */
2555 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2556 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2557 /* Number of instructions in INSN_BUF. */
2558 unsigned insn_count;
2559 /* Registers when doing displaced stepping. */
2560 struct regcache *regs;
2562 aarch64_displaced_step_closure *dsc;
2565 /* Implementation of aarch64_insn_visitor method "b". */
2568 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2569 struct aarch64_insn_data *data)
2571 struct aarch64_displaced_step_data *dsd
2572 = (struct aarch64_displaced_step_data *) data;
2573 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2575 if (can_encode_int32 (new_offset, 28))
2577 /* Emit B rather than BL, because executing BL on a new address
2578 will get the wrong address into LR. In order to avoid this,
2579 we emit B, and update LR if the instruction is BL. */
2580 emit_b (dsd->insn_buf, 0, new_offset);
2586 emit_nop (dsd->insn_buf);
2588 dsd->dsc->pc_adjust = offset;
2594 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2595 data->insn_addr + 4);
2599 /* Implementation of aarch64_insn_visitor method "b_cond". */
2602 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2603 struct aarch64_insn_data *data)
2605 struct aarch64_displaced_step_data *dsd
2606 = (struct aarch64_displaced_step_data *) data;
2608 /* GDB has to fix up PC after displaced step this instruction
2609 differently according to the condition is true or false. Instead
2610 of checking COND against conditional flags, we can use
2611 the following instructions, and GDB can tell how to fix up PC
2612 according to the PC value.
2614 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2620 emit_bcond (dsd->insn_buf, cond, 8);
2622 dsd->dsc->pc_adjust = offset;
2623 dsd->insn_count = 1;
2626 /* Dynamically allocate a new register. If we know the register
2627 statically, we should make it a global as above instead of using this
2630 static struct aarch64_register
2631 aarch64_register (unsigned num, int is64)
2633 return (struct aarch64_register) { num, is64 };
2636 /* Implementation of aarch64_insn_visitor method "cb". */
2639 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2640 const unsigned rn, int is64,
2641 struct aarch64_insn_data *data)
2643 struct aarch64_displaced_step_data *dsd
2644 = (struct aarch64_displaced_step_data *) data;
2646 /* The offset is out of range for a compare and branch
2647 instruction. We can use the following instructions instead:
2649 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2654 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2655 dsd->insn_count = 1;
2657 dsd->dsc->pc_adjust = offset;
2660 /* Implementation of aarch64_insn_visitor method "tb". */
2663 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2664 const unsigned rt, unsigned bit,
2665 struct aarch64_insn_data *data)
2667 struct aarch64_displaced_step_data *dsd
2668 = (struct aarch64_displaced_step_data *) data;
2670 /* The offset is out of range for a test bit and branch
2671 instruction We can use the following instructions instead:
2673 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2679 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2680 dsd->insn_count = 1;
2682 dsd->dsc->pc_adjust = offset;
2685 /* Implementation of aarch64_insn_visitor method "adr". */
2688 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2689 const int is_adrp, struct aarch64_insn_data *data)
2691 struct aarch64_displaced_step_data *dsd
2692 = (struct aarch64_displaced_step_data *) data;
2693 /* We know exactly the address the ADR{P,} instruction will compute.
2694 We can just write it to the destination register. */
2695 CORE_ADDR address = data->insn_addr + offset;
2699 /* Clear the lower 12 bits of the offset to get the 4K page. */
2700 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2704 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2707 dsd->dsc->pc_adjust = 4;
2708 emit_nop (dsd->insn_buf);
2709 dsd->insn_count = 1;
2712 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2715 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2716 const unsigned rt, const int is64,
2717 struct aarch64_insn_data *data)
2719 struct aarch64_displaced_step_data *dsd
2720 = (struct aarch64_displaced_step_data *) data;
2721 CORE_ADDR address = data->insn_addr + offset;
2722 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2724 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2728 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2729 aarch64_register (rt, 1), zero);
2731 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2732 aarch64_register (rt, 1), zero);
2734 dsd->dsc->pc_adjust = 4;
2737 /* Implementation of aarch64_insn_visitor method "others". */
2740 aarch64_displaced_step_others (const uint32_t insn,
2741 struct aarch64_insn_data *data)
2743 struct aarch64_displaced_step_data *dsd
2744 = (struct aarch64_displaced_step_data *) data;
2746 aarch64_emit_insn (dsd->insn_buf, insn);
2747 dsd->insn_count = 1;
2749 if ((insn & 0xfffffc1f) == 0xd65f0000)
2752 dsd->dsc->pc_adjust = 0;
2755 dsd->dsc->pc_adjust = 4;
2758 static const struct aarch64_insn_visitor visitor =
2760 aarch64_displaced_step_b,
2761 aarch64_displaced_step_b_cond,
2762 aarch64_displaced_step_cb,
2763 aarch64_displaced_step_tb,
2764 aarch64_displaced_step_adr,
2765 aarch64_displaced_step_ldr_literal,
2766 aarch64_displaced_step_others,
2769 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2771 struct displaced_step_closure *
2772 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2773 CORE_ADDR from, CORE_ADDR to,
2774 struct regcache *regs)
2776 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2777 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2778 struct aarch64_displaced_step_data dsd;
2781 if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2784 /* Look for a Load Exclusive instruction which begins the sequence. */
2785 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2787 /* We can't displaced step atomic sequences. */
2791 std::unique_ptr<aarch64_displaced_step_closure> dsc
2792 (new aarch64_displaced_step_closure);
2793 dsd.base.insn_addr = from;
2796 dsd.dsc = dsc.get ();
2798 aarch64_relocate_instruction (insn, &visitor,
2799 (struct aarch64_insn_data *) &dsd);
2800 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2802 if (dsd.insn_count != 0)
2806 /* Instruction can be relocated to scratch pad. Copy
2807 relocated instruction(s) there. */
2808 for (i = 0; i < dsd.insn_count; i++)
2810 if (debug_displaced)
2812 debug_printf ("displaced: writing insn ");
2813 debug_printf ("%.8x", dsd.insn_buf[i]);
2814 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2816 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2817 (ULONGEST) dsd.insn_buf[i]);
2825 return dsc.release ();
2828 /* Implement the "displaced_step_fixup" gdbarch method. */
2831 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2832 struct displaced_step_closure *dsc_,
2833 CORE_ADDR from, CORE_ADDR to,
2834 struct regcache *regs)
2836 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2842 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2845 /* Condition is true. */
2847 else if (pc - to == 4)
2849 /* Condition is false. */
2853 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2856 if (dsc->pc_adjust != 0)
2858 if (debug_displaced)
2860 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2861 paddress (gdbarch, from), dsc->pc_adjust);
2863 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2864 from + dsc->pc_adjust);
2868 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2871 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2872 struct displaced_step_closure *closure)
2877 /* Get the correct target description for the given VQ value.
2878 If VQ is zero then it is assumed SVE is not supported.
2879 (It is not possible to set VQ to zero on an SVE system). */
2882 aarch64_read_description (uint64_t vq)
2884 if (vq > AARCH64_MAX_SVE_VQ)
2885 error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
2886 AARCH64_MAX_SVE_VQ);
2888 struct target_desc *tdesc = tdesc_aarch64_list[vq];
2892 tdesc = aarch64_create_target_description (vq);
2893 tdesc_aarch64_list[vq] = tdesc;
2899 /* Return the VQ used when creating the target description TDESC. */
2902 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
2904 const struct tdesc_feature *feature_sve;
2906 if (!tdesc_has_registers (tdesc))
2909 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2911 if (feature_sve == nullptr)
2914 uint64_t vl = tdesc_register_size (feature_sve,
2915 aarch64_sve_register_names[0]);
2916 return sve_vq_from_vl (vl);
2920 /* Initialize the current architecture based on INFO. If possible,
2921 re-use an architecture from ARCHES, which is a list of
2922 architectures already created during this debugging session.
2924 Called e.g. at program startup, when reading a core file, and when
2925 reading a binary file. */
2927 static struct gdbarch *
2928 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2930 struct gdbarch_tdep *tdep;
2931 struct gdbarch *gdbarch;
2932 struct gdbarch_list *best_arch;
2933 struct tdesc_arch_data *tdesc_data = NULL;
2934 const struct target_desc *tdesc = info.target_desc;
2937 const struct tdesc_feature *feature_core;
2938 const struct tdesc_feature *feature_fpu;
2939 const struct tdesc_feature *feature_sve;
2941 int num_pseudo_regs = 0;
2943 /* Ensure we always have a target description. */
2944 if (!tdesc_has_registers (tdesc))
2945 tdesc = aarch64_read_description (0);
2948 feature_core = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2949 feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2950 feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
2952 if (feature_core == NULL)
2955 tdesc_data = tdesc_data_alloc ();
2957 /* Validate the description provides the mandatory core R registers
2958 and allocate their numbers. */
2959 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2960 valid_p &= tdesc_numbered_register (feature_core, tdesc_data,
2961 AARCH64_X0_REGNUM + i,
2962 aarch64_r_register_names[i]);
2964 num_regs = AARCH64_X0_REGNUM + i;
2966 /* Add the V registers. */
2967 if (feature_fpu != NULL)
2969 if (feature_sve != NULL)
2970 error (_("Program contains both fpu and SVE features."));
2972 /* Validate the description provides the mandatory V registers
2973 and allocate their numbers. */
2974 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2975 valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data,
2976 AARCH64_V0_REGNUM + i,
2977 aarch64_v_register_names[i]);
2979 num_regs = AARCH64_V0_REGNUM + i;
2982 /* Add the SVE registers. */
2983 if (feature_sve != NULL)
2985 /* Validate the description provides the mandatory SVE registers
2986 and allocate their numbers. */
2987 for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
2988 valid_p &= tdesc_numbered_register (feature_sve, tdesc_data,
2989 AARCH64_SVE_Z0_REGNUM + i,
2990 aarch64_sve_register_names[i]);
2992 num_regs = AARCH64_SVE_Z0_REGNUM + i;
2993 num_pseudo_regs += 32; /* add the Vn register pseudos. */
2996 if (feature_fpu != NULL || feature_sve != NULL)
2998 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2999 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
3000 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
3001 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
3002 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
3007 tdesc_data_cleanup (tdesc_data);
3011 /* AArch64 code is always little-endian. */
3012 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3014 /* If there is already a candidate, use it. */
3015 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
3017 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3019 /* Found a match. */
3023 if (best_arch != NULL)
3025 if (tdesc_data != NULL)
3026 tdesc_data_cleanup (tdesc_data);
3027 return best_arch->gdbarch;
3030 tdep = XCNEW (struct gdbarch_tdep);
3031 gdbarch = gdbarch_alloc (&info, tdep);
3033 /* This should be low enough for everything. */
3034 tdep->lowest_pc = 0x20;
3035 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
3036 tdep->jb_elt_size = 8;
3037 tdep->vq = aarch64_get_tdesc_vq (tdesc);
3039 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3040 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3042 /* Frame handling. */
3043 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
3044 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
3045 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
3047 /* Advance PC across function entry code. */
3048 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3050 /* The stack grows downward. */
3051 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3053 /* Breakpoint manipulation. */
3054 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3055 aarch64_breakpoint::kind_from_pc);
3056 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3057 aarch64_breakpoint::bp_from_kind);
3058 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3059 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3061 /* Information about registers, etc. */
3062 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3063 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3064 set_gdbarch_num_regs (gdbarch, num_regs);
3066 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3067 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3068 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3069 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3070 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3071 set_tdesc_pseudo_register_reggroup_p (gdbarch,
3072 aarch64_pseudo_register_reggroup_p);
3075 set_gdbarch_short_bit (gdbarch, 16);
3076 set_gdbarch_int_bit (gdbarch, 32);
3077 set_gdbarch_float_bit (gdbarch, 32);
3078 set_gdbarch_double_bit (gdbarch, 64);
3079 set_gdbarch_long_double_bit (gdbarch, 128);
3080 set_gdbarch_long_bit (gdbarch, 64);
3081 set_gdbarch_long_long_bit (gdbarch, 64);
3082 set_gdbarch_ptr_bit (gdbarch, 64);
3083 set_gdbarch_char_signed (gdbarch, 0);
3084 set_gdbarch_wchar_signed (gdbarch, 0);
3085 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3086 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3087 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3089 /* Internal <-> external register number maps. */
3090 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3092 /* Returning results. */
3093 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3096 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3098 /* Virtual tables. */
3099 set_gdbarch_vbit_in_delta (gdbarch, 1);
3101 /* Hook in the ABI-specific overrides, if they have been registered. */
3102 info.target_desc = tdesc;
3103 info.tdesc_data = tdesc_data;
3104 gdbarch_init_osabi (info, gdbarch);
3106 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3108 /* Add some default predicates. */
3109 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3110 dwarf2_append_unwinders (gdbarch);
3111 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3113 frame_base_set_default (gdbarch, &aarch64_normal_base);
3115 /* Now we have tuned the configuration, set a few final things,
3116 based on what the OS ABI has told us. */
3118 if (tdep->jb_pc >= 0)
3119 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3121 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3123 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3125 /* Add standard register aliases. */
3126 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3127 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3128 value_of_aarch64_user_reg,
3129 &aarch64_register_aliases[i].regnum);
3135 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3137 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3142 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3143 paddress (gdbarch, tdep->lowest_pc));
3149 static void aarch64_process_record_test (void);
3154 _initialize_aarch64_tdep (void)
3156 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3159 /* Debug this file's internals. */
3160 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3161 Set AArch64 debugging."), _("\
3162 Show AArch64 debugging."), _("\
3163 When on, AArch64 specific debugging is enabled."),
3166 &setdebuglist, &showdebuglist);
3169 selftests::register_test ("aarch64-analyze-prologue",
3170 selftests::aarch64_analyze_prologue_test);
3171 selftests::register_test ("aarch64-process-record",
3172 selftests::aarch64_process_record_test);
3173 selftests::record_xml_tdesc ("aarch64.xml",
3174 aarch64_create_target_description (0));
3178 /* AArch64 process record-replay related structures, defines etc. */
3180 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3183 unsigned int reg_len = LENGTH; \
3186 REGS = XNEWVEC (uint32_t, reg_len); \
3187 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3192 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3195 unsigned int mem_len = LENGTH; \
3198 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3199 memcpy(&MEMS->len, &RECORD_BUF[0], \
3200 sizeof(struct aarch64_mem_r) * LENGTH); \
3205 /* AArch64 record/replay structures and enumerations. */
3207 struct aarch64_mem_r
3209 uint64_t len; /* Record length. */
3210 uint64_t addr; /* Memory address. */
3213 enum aarch64_record_result
3215 AARCH64_RECORD_SUCCESS,
3216 AARCH64_RECORD_UNSUPPORTED,
3217 AARCH64_RECORD_UNKNOWN
3220 typedef struct insn_decode_record_t
3222 struct gdbarch *gdbarch;
3223 struct regcache *regcache;
3224 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3225 uint32_t aarch64_insn; /* Insn to be recorded. */
3226 uint32_t mem_rec_count; /* Count of memory records. */
3227 uint32_t reg_rec_count; /* Count of register records. */
3228 uint32_t *aarch64_regs; /* Registers to be recorded. */
3229 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3230 } insn_decode_record;
3232 /* Record handler for data processing - register instructions. */
3235 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3237 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3238 uint32_t record_buf[4];
3240 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3241 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3242 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3244 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3248 /* Logical (shifted register). */
3249 if (insn_bits24_27 == 0x0a)
3250 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3252 else if (insn_bits24_27 == 0x0b)
3253 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3255 return AARCH64_RECORD_UNKNOWN;
3257 record_buf[0] = reg_rd;
3258 aarch64_insn_r->reg_rec_count = 1;
3260 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3264 if (insn_bits24_27 == 0x0b)
3266 /* Data-processing (3 source). */
3267 record_buf[0] = reg_rd;
3268 aarch64_insn_r->reg_rec_count = 1;
3270 else if (insn_bits24_27 == 0x0a)
3272 if (insn_bits21_23 == 0x00)
3274 /* Add/subtract (with carry). */
3275 record_buf[0] = reg_rd;
3276 aarch64_insn_r->reg_rec_count = 1;
3277 if (bit (aarch64_insn_r->aarch64_insn, 29))
3279 record_buf[1] = AARCH64_CPSR_REGNUM;
3280 aarch64_insn_r->reg_rec_count = 2;
3283 else if (insn_bits21_23 == 0x02)
3285 /* Conditional compare (register) and conditional compare
3286 (immediate) instructions. */
3287 record_buf[0] = AARCH64_CPSR_REGNUM;
3288 aarch64_insn_r->reg_rec_count = 1;
3290 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3292 /* CConditional select. */
3293 /* Data-processing (2 source). */
3294 /* Data-processing (1 source). */
3295 record_buf[0] = reg_rd;
3296 aarch64_insn_r->reg_rec_count = 1;
3299 return AARCH64_RECORD_UNKNOWN;
3303 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3305 return AARCH64_RECORD_SUCCESS;
3308 /* Record handler for data processing - immediate instructions. */
3311 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3313 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3314 uint32_t record_buf[4];
3316 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3317 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3318 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3320 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3321 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3322 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3324 record_buf[0] = reg_rd;
3325 aarch64_insn_r->reg_rec_count = 1;
3327 else if (insn_bits24_27 == 0x01)
3329 /* Add/Subtract (immediate). */
3330 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3331 record_buf[0] = reg_rd;
3332 aarch64_insn_r->reg_rec_count = 1;
3334 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3336 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3338 /* Logical (immediate). */
3339 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3340 record_buf[0] = reg_rd;
3341 aarch64_insn_r->reg_rec_count = 1;
3343 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3346 return AARCH64_RECORD_UNKNOWN;
3348 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3350 return AARCH64_RECORD_SUCCESS;
3353 /* Record handler for branch, exception generation and system instructions. */
3356 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3358 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3359 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3360 uint32_t record_buf[4];
3362 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3363 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3364 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3366 if (insn_bits28_31 == 0x0d)
3368 /* Exception generation instructions. */
3369 if (insn_bits24_27 == 0x04)
3371 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3372 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3373 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3375 ULONGEST svc_number;
3377 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3379 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3383 return AARCH64_RECORD_UNSUPPORTED;
3385 /* System instructions. */
3386 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3388 uint32_t reg_rt, reg_crn;
3390 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3391 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3393 /* Record rt in case of sysl and mrs instructions. */
3394 if (bit (aarch64_insn_r->aarch64_insn, 21))
3396 record_buf[0] = reg_rt;
3397 aarch64_insn_r->reg_rec_count = 1;
3399 /* Record cpsr for hint and msr(immediate) instructions. */
3400 else if (reg_crn == 0x02 || reg_crn == 0x04)
3402 record_buf[0] = AARCH64_CPSR_REGNUM;
3403 aarch64_insn_r->reg_rec_count = 1;
3406 /* Unconditional branch (register). */
3407 else if((insn_bits24_27 & 0x0e) == 0x06)
3409 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3410 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3411 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3414 return AARCH64_RECORD_UNKNOWN;
3416 /* Unconditional branch (immediate). */
3417 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3419 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3420 if (bit (aarch64_insn_r->aarch64_insn, 31))
3421 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3424 /* Compare & branch (immediate), Test & branch (immediate) and
3425 Conditional branch (immediate). */
3426 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3428 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3430 return AARCH64_RECORD_SUCCESS;
3433 /* Record handler for advanced SIMD load and store instructions. */
3436 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3439 uint64_t addr_offset = 0;
3440 uint32_t record_buf[24];
3441 uint64_t record_buf_mem[24];
3442 uint32_t reg_rn, reg_rt;
3443 uint32_t reg_index = 0, mem_index = 0;
3444 uint8_t opcode_bits, size_bits;
3446 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3447 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3448 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3449 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3450 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3453 debug_printf ("Process record: Advanced SIMD load/store\n");
3455 /* Load/store single structure. */
3456 if (bit (aarch64_insn_r->aarch64_insn, 24))
3458 uint8_t sindex, scale, selem, esize, replicate = 0;
3459 scale = opcode_bits >> 2;
3460 selem = ((opcode_bits & 0x02) |
3461 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3465 if (size_bits & 0x01)
3466 return AARCH64_RECORD_UNKNOWN;
3469 if ((size_bits >> 1) & 0x01)
3470 return AARCH64_RECORD_UNKNOWN;
3471 if (size_bits & 0x01)
3473 if (!((opcode_bits >> 1) & 0x01))
3476 return AARCH64_RECORD_UNKNOWN;
3480 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3487 return AARCH64_RECORD_UNKNOWN;
3493 for (sindex = 0; sindex < selem; sindex++)
3495 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3496 reg_rt = (reg_rt + 1) % 32;
3500 for (sindex = 0; sindex < selem; sindex++)
3502 if (bit (aarch64_insn_r->aarch64_insn, 22))
3503 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3506 record_buf_mem[mem_index++] = esize / 8;
3507 record_buf_mem[mem_index++] = address + addr_offset;
3509 addr_offset = addr_offset + (esize / 8);
3510 reg_rt = (reg_rt + 1) % 32;
3514 /* Load/store multiple structure. */
3517 uint8_t selem, esize, rpt, elements;
3518 uint8_t eindex, rindex;
3520 esize = 8 << size_bits;
3521 if (bit (aarch64_insn_r->aarch64_insn, 30))
3522 elements = 128 / esize;
3524 elements = 64 / esize;
3526 switch (opcode_bits)
3528 /*LD/ST4 (4 Registers). */
3533 /*LD/ST1 (4 Registers). */
3538 /*LD/ST3 (3 Registers). */
3543 /*LD/ST1 (3 Registers). */
3548 /*LD/ST1 (1 Register). */
3553 /*LD/ST2 (2 Registers). */
3558 /*LD/ST1 (2 Registers). */
3564 return AARCH64_RECORD_UNSUPPORTED;
3567 for (rindex = 0; rindex < rpt; rindex++)
3568 for (eindex = 0; eindex < elements; eindex++)
3570 uint8_t reg_tt, sindex;
3571 reg_tt = (reg_rt + rindex) % 32;
3572 for (sindex = 0; sindex < selem; sindex++)
3574 if (bit (aarch64_insn_r->aarch64_insn, 22))
3575 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3578 record_buf_mem[mem_index++] = esize / 8;
3579 record_buf_mem[mem_index++] = address + addr_offset;
3581 addr_offset = addr_offset + (esize / 8);
3582 reg_tt = (reg_tt + 1) % 32;
3587 if (bit (aarch64_insn_r->aarch64_insn, 23))
3588 record_buf[reg_index++] = reg_rn;
3590 aarch64_insn_r->reg_rec_count = reg_index;
3591 aarch64_insn_r->mem_rec_count = mem_index / 2;
3592 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3594 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3596 return AARCH64_RECORD_SUCCESS;
3599 /* Record handler for load and store instructions. */
3602 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3604 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3605 uint8_t insn_bit23, insn_bit21;
3606 uint8_t opc, size_bits, ld_flag, vector_flag;
3607 uint32_t reg_rn, reg_rt, reg_rt2;
3608 uint64_t datasize, offset;
3609 uint32_t record_buf[8];
3610 uint64_t record_buf_mem[8];
3613 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3614 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3615 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3616 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3617 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3618 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3619 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3620 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3621 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3622 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3623 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3625 /* Load/store exclusive. */
3626 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3629 debug_printf ("Process record: load/store exclusive\n");
3633 record_buf[0] = reg_rt;
3634 aarch64_insn_r->reg_rec_count = 1;
3637 record_buf[1] = reg_rt2;
3638 aarch64_insn_r->reg_rec_count = 2;
3644 datasize = (8 << size_bits) * 2;
3646 datasize = (8 << size_bits);
3647 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3649 record_buf_mem[0] = datasize / 8;
3650 record_buf_mem[1] = address;
3651 aarch64_insn_r->mem_rec_count = 1;
3654 /* Save register rs. */
3655 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3656 aarch64_insn_r->reg_rec_count = 1;
3660 /* Load register (literal) instructions decoding. */
3661 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3664 debug_printf ("Process record: load register (literal)\n");
3666 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3668 record_buf[0] = reg_rt;
3669 aarch64_insn_r->reg_rec_count = 1;
3671 /* All types of load/store pair instructions decoding. */
3672 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3675 debug_printf ("Process record: load/store pair\n");
3681 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3682 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3686 record_buf[0] = reg_rt;
3687 record_buf[1] = reg_rt2;
3689 aarch64_insn_r->reg_rec_count = 2;
3694 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3696 size_bits = size_bits >> 1;
3697 datasize = 8 << (2 + size_bits);
3698 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3699 offset = offset << (2 + size_bits);
3700 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3702 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3704 if (imm7_off & 0x40)
3705 address = address - offset;
3707 address = address + offset;
3710 record_buf_mem[0] = datasize / 8;
3711 record_buf_mem[1] = address;
3712 record_buf_mem[2] = datasize / 8;
3713 record_buf_mem[3] = address + (datasize / 8);
3714 aarch64_insn_r->mem_rec_count = 2;
3716 if (bit (aarch64_insn_r->aarch64_insn, 23))
3717 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3719 /* Load/store register (unsigned immediate) instructions. */
3720 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3722 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3732 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3734 /* PRFM (immediate) */
3735 return AARCH64_RECORD_SUCCESS;
3737 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3739 /* LDRSW (immediate) */
3753 debug_printf ("Process record: load/store (unsigned immediate):"
3754 " size %x V %d opc %x\n", size_bits, vector_flag,
3760 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3761 datasize = 8 << size_bits;
3762 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3764 offset = offset << size_bits;
3765 address = address + offset;
3767 record_buf_mem[0] = datasize >> 3;
3768 record_buf_mem[1] = address;
3769 aarch64_insn_r->mem_rec_count = 1;
3774 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3776 record_buf[0] = reg_rt;
3777 aarch64_insn_r->reg_rec_count = 1;
3780 /* Load/store register (register offset) instructions. */
3781 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3782 && insn_bits10_11 == 0x02 && insn_bit21)
3785 debug_printf ("Process record: load/store (register offset)\n");
3786 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3793 if (size_bits != 0x03)
3796 return AARCH64_RECORD_UNKNOWN;
3800 ULONGEST reg_rm_val;
3802 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3803 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3804 if (bit (aarch64_insn_r->aarch64_insn, 12))
3805 offset = reg_rm_val << size_bits;
3807 offset = reg_rm_val;
3808 datasize = 8 << size_bits;
3809 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3811 address = address + offset;
3812 record_buf_mem[0] = datasize >> 3;
3813 record_buf_mem[1] = address;
3814 aarch64_insn_r->mem_rec_count = 1;
3819 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3821 record_buf[0] = reg_rt;
3822 aarch64_insn_r->reg_rec_count = 1;
3825 /* Load/store register (immediate and unprivileged) instructions. */
3826 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3831 debug_printf ("Process record: load/store "
3832 "(immediate and unprivileged)\n");
3834 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3841 if (size_bits != 0x03)
3844 return AARCH64_RECORD_UNKNOWN;
3849 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3850 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3851 datasize = 8 << size_bits;
3852 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3854 if (insn_bits10_11 != 0x01)
3856 if (imm9_off & 0x0100)
3857 address = address - offset;
3859 address = address + offset;
3861 record_buf_mem[0] = datasize >> 3;
3862 record_buf_mem[1] = address;
3863 aarch64_insn_r->mem_rec_count = 1;
3868 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3870 record_buf[0] = reg_rt;
3871 aarch64_insn_r->reg_rec_count = 1;
3873 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3874 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3876 /* Advanced SIMD load/store instructions. */
3878 return aarch64_record_asimd_load_store (aarch64_insn_r);
3880 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3882 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3884 return AARCH64_RECORD_SUCCESS;
3887 /* Record handler for data processing SIMD and floating point instructions. */
3890 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3892 uint8_t insn_bit21, opcode, rmode, reg_rd;
3893 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3894 uint8_t insn_bits11_14;
3895 uint32_t record_buf[2];
3897 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3898 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3899 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3900 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3901 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3902 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3903 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3904 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3905 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3908 debug_printf ("Process record: data processing SIMD/FP: ");
3910 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3912 /* Floating point - fixed point conversion instructions. */
3916 debug_printf ("FP - fixed point conversion");
3918 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3919 record_buf[0] = reg_rd;
3921 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3923 /* Floating point - conditional compare instructions. */
3924 else if (insn_bits10_11 == 0x01)
3927 debug_printf ("FP - conditional compare");
3929 record_buf[0] = AARCH64_CPSR_REGNUM;
3931 /* Floating point - data processing (2-source) and
3932 conditional select instructions. */
3933 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3936 debug_printf ("FP - DP (2-source)");
3938 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3940 else if (insn_bits10_11 == 0x00)
3942 /* Floating point - immediate instructions. */
3943 if ((insn_bits12_15 & 0x01) == 0x01
3944 || (insn_bits12_15 & 0x07) == 0x04)
3947 debug_printf ("FP - immediate");
3948 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3950 /* Floating point - compare instructions. */
3951 else if ((insn_bits12_15 & 0x03) == 0x02)
3954 debug_printf ("FP - immediate");
3955 record_buf[0] = AARCH64_CPSR_REGNUM;
3957 /* Floating point - integer conversions instructions. */
3958 else if (insn_bits12_15 == 0x00)
3960 /* Convert float to integer instruction. */
3961 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3964 debug_printf ("float to int conversion");
3966 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3968 /* Convert integer to float instruction. */
3969 else if ((opcode >> 1) == 0x01 && !rmode)
3972 debug_printf ("int to float conversion");
3974 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3976 /* Move float to integer instruction. */
3977 else if ((opcode >> 1) == 0x03)
3980 debug_printf ("move float to int");
3982 if (!(opcode & 0x01))
3983 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3985 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3988 return AARCH64_RECORD_UNKNOWN;
3991 return AARCH64_RECORD_UNKNOWN;
3994 return AARCH64_RECORD_UNKNOWN;
3996 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3999 debug_printf ("SIMD copy");
4001 /* Advanced SIMD copy instructions. */
4002 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4003 && !bit (aarch64_insn_r->aarch64_insn, 15)
4004 && bit (aarch64_insn_r->aarch64_insn, 10))
4006 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4007 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4009 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4012 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4014 /* All remaining floating point or advanced SIMD instructions. */
4018 debug_printf ("all remain");
4020 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4024 debug_printf ("\n");
4026 aarch64_insn_r->reg_rec_count++;
4027 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
4028 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4030 return AARCH64_RECORD_SUCCESS;
4033 /* Decodes insns type and invokes its record handler. */
4036 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4038 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4040 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4041 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4042 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4043 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4045 /* Data processing - immediate instructions. */
4046 if (!ins_bit26 && !ins_bit27 && ins_bit28)
4047 return aarch64_record_data_proc_imm (aarch64_insn_r);
4049 /* Branch, exception generation and system instructions. */
4050 if (ins_bit26 && !ins_bit27 && ins_bit28)
4051 return aarch64_record_branch_except_sys (aarch64_insn_r);
4053 /* Load and store instructions. */
4054 if (!ins_bit25 && ins_bit27)
4055 return aarch64_record_load_store (aarch64_insn_r);
4057 /* Data processing - register instructions. */
4058 if (ins_bit25 && !ins_bit26 && ins_bit27)
4059 return aarch64_record_data_proc_reg (aarch64_insn_r);
4061 /* Data processing - SIMD and floating point instructions. */
4062 if (ins_bit25 && ins_bit26 && ins_bit27)
4063 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4065 return AARCH64_RECORD_UNSUPPORTED;
4068 /* Cleans up local record registers and memory allocations. */
4071 deallocate_reg_mem (insn_decode_record *record)
4073 xfree (record->aarch64_regs);
4074 xfree (record->aarch64_mems);
4078 namespace selftests {
4081 aarch64_process_record_test (void)
4083 struct gdbarch_info info;
4086 gdbarch_info_init (&info);
4087 info.bfd_arch_info = bfd_scan_arch ("aarch64");
4089 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4090 SELF_CHECK (gdbarch != NULL);
4092 insn_decode_record aarch64_record;
4094 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4095 aarch64_record.regcache = NULL;
4096 aarch64_record.this_addr = 0;
4097 aarch64_record.gdbarch = gdbarch;
4099 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4100 aarch64_record.aarch64_insn = 0xf9800020;
4101 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4102 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4103 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4104 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4106 deallocate_reg_mem (&aarch64_record);
4109 } // namespace selftests
4110 #endif /* GDB_SELF_TEST */
4112 /* Parse the current instruction and record the values of the registers and
4113 memory that will be changed in current instruction to record_arch_list
4114 return -1 if something is wrong. */
4117 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4118 CORE_ADDR insn_addr)
4120 uint32_t rec_no = 0;
4121 uint8_t insn_size = 4;
4123 gdb_byte buf[insn_size];
4124 insn_decode_record aarch64_record;
4126 memset (&buf[0], 0, insn_size);
4127 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4128 target_read_memory (insn_addr, &buf[0], insn_size);
4129 aarch64_record.aarch64_insn
4130 = (uint32_t) extract_unsigned_integer (&buf[0],
4132 gdbarch_byte_order (gdbarch));
4133 aarch64_record.regcache = regcache;
4134 aarch64_record.this_addr = insn_addr;
4135 aarch64_record.gdbarch = gdbarch;
4137 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4138 if (ret == AARCH64_RECORD_UNSUPPORTED)
4140 printf_unfiltered (_("Process record does not support instruction "
4141 "0x%0x at address %s.\n"),
4142 aarch64_record.aarch64_insn,
4143 paddress (gdbarch, insn_addr));
4149 /* Record registers. */
4150 record_full_arch_list_add_reg (aarch64_record.regcache,
4152 /* Always record register CPSR. */
4153 record_full_arch_list_add_reg (aarch64_record.regcache,
4154 AARCH64_CPSR_REGNUM);
4155 if (aarch64_record.aarch64_regs)
4156 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4157 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4158 aarch64_record.aarch64_regs[rec_no]))
4161 /* Record memories. */
4162 if (aarch64_record.aarch64_mems)
4163 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4164 if (record_full_arch_list_add_mem
4165 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4166 aarch64_record.aarch64_mems[rec_no].len))
4169 if (record_full_arch_list_add_end ())
4173 deallocate_reg_mem (&aarch64_record);