1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2018 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
31 #include "arch-utils.h"
33 #include "frame-unwind.h"
34 #include "frame-base.h"
35 #include "trad-frame.h"
37 #include "dwarf2-frame.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
48 #include "aarch64-tdep.h"
51 #include "elf/aarch64.h"
56 #include "record-full.h"
57 #include "arch/aarch64-insn.h"
59 #include "opcode/aarch64.h"
62 #define submask(x) ((1L << ((x) + 1)) - 1)
63 #define bit(obj,st) (((obj) >> (st)) & 1)
64 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
66 /* Pseudo register base numbers. */
67 #define AARCH64_Q0_REGNUM 0
68 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
69 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
70 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
71 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
73 /* The standard register names, and all the valid aliases for them. */
76 const char *const name;
78 } aarch64_register_aliases[] =
80 /* 64-bit register names. */
81 {"fp", AARCH64_FP_REGNUM},
82 {"lr", AARCH64_LR_REGNUM},
83 {"sp", AARCH64_SP_REGNUM},
85 /* 32-bit register names. */
86 {"w0", AARCH64_X0_REGNUM + 0},
87 {"w1", AARCH64_X0_REGNUM + 1},
88 {"w2", AARCH64_X0_REGNUM + 2},
89 {"w3", AARCH64_X0_REGNUM + 3},
90 {"w4", AARCH64_X0_REGNUM + 4},
91 {"w5", AARCH64_X0_REGNUM + 5},
92 {"w6", AARCH64_X0_REGNUM + 6},
93 {"w7", AARCH64_X0_REGNUM + 7},
94 {"w8", AARCH64_X0_REGNUM + 8},
95 {"w9", AARCH64_X0_REGNUM + 9},
96 {"w10", AARCH64_X0_REGNUM + 10},
97 {"w11", AARCH64_X0_REGNUM + 11},
98 {"w12", AARCH64_X0_REGNUM + 12},
99 {"w13", AARCH64_X0_REGNUM + 13},
100 {"w14", AARCH64_X0_REGNUM + 14},
101 {"w15", AARCH64_X0_REGNUM + 15},
102 {"w16", AARCH64_X0_REGNUM + 16},
103 {"w17", AARCH64_X0_REGNUM + 17},
104 {"w18", AARCH64_X0_REGNUM + 18},
105 {"w19", AARCH64_X0_REGNUM + 19},
106 {"w20", AARCH64_X0_REGNUM + 20},
107 {"w21", AARCH64_X0_REGNUM + 21},
108 {"w22", AARCH64_X0_REGNUM + 22},
109 {"w23", AARCH64_X0_REGNUM + 23},
110 {"w24", AARCH64_X0_REGNUM + 24},
111 {"w25", AARCH64_X0_REGNUM + 25},
112 {"w26", AARCH64_X0_REGNUM + 26},
113 {"w27", AARCH64_X0_REGNUM + 27},
114 {"w28", AARCH64_X0_REGNUM + 28},
115 {"w29", AARCH64_X0_REGNUM + 29},
116 {"w30", AARCH64_X0_REGNUM + 30},
119 {"ip0", AARCH64_X0_REGNUM + 16},
120 {"ip1", AARCH64_X0_REGNUM + 17}
123 /* The required core 'R' registers. */
124 static const char *const aarch64_r_register_names[] =
126 /* These registers must appear in consecutive RAW register number
127 order and they must begin with AARCH64_X0_REGNUM! */
128 "x0", "x1", "x2", "x3",
129 "x4", "x5", "x6", "x7",
130 "x8", "x9", "x10", "x11",
131 "x12", "x13", "x14", "x15",
132 "x16", "x17", "x18", "x19",
133 "x20", "x21", "x22", "x23",
134 "x24", "x25", "x26", "x27",
135 "x28", "x29", "x30", "sp",
139 /* The FP/SIMD 'V' registers. */
140 static const char *const aarch64_v_register_names[] =
142 /* These registers must appear in consecutive RAW register number
143 order and they must begin with AARCH64_V0_REGNUM! */
144 "v0", "v1", "v2", "v3",
145 "v4", "v5", "v6", "v7",
146 "v8", "v9", "v10", "v11",
147 "v12", "v13", "v14", "v15",
148 "v16", "v17", "v18", "v19",
149 "v20", "v21", "v22", "v23",
150 "v24", "v25", "v26", "v27",
151 "v28", "v29", "v30", "v31",
156 /* AArch64 prologue cache structure. */
157 struct aarch64_prologue_cache
159 /* The program counter at the start of the function. It is used to
160 identify this frame as a prologue frame. */
163 /* The program counter at the time this frame was created; i.e. where
164 this function was called from. It is used to identify this frame as a
168 /* The stack pointer at the time this frame was created; i.e. the
169 caller's stack pointer when this function was called. It is used
170 to identify this frame. */
173 /* Is the target available to read from? */
176 /* The frame base for this frame is just prev_sp - frame size.
177 FRAMESIZE is the distance from the frame pointer to the
178 initial stack pointer. */
181 /* The register used to hold the frame pointer for this frame. */
184 /* Saved register offsets. */
185 struct trad_frame_saved_reg *saved_regs;
189 show_aarch64_debug (struct ui_file *file, int from_tty,
190 struct cmd_list_element *c, const char *value)
192 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
197 /* Abstract instruction reader. */
199 class abstract_instruction_reader
202 /* Read in one instruction. */
203 virtual ULONGEST read (CORE_ADDR memaddr, int len,
204 enum bfd_endian byte_order) = 0;
207 /* Instruction reader from real target. */
209 class instruction_reader : public abstract_instruction_reader
212 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
214 return read_code_unsigned_integer (memaddr, len, byte_order);
220 /* Analyze a prologue, looking for a recognizable stack frame
221 and frame pointer. Scan until we encounter a store that could
222 clobber the stack frame unexpectedly, or an unknown instruction. */
225 aarch64_analyze_prologue (struct gdbarch *gdbarch,
226 CORE_ADDR start, CORE_ADDR limit,
227 struct aarch64_prologue_cache *cache,
228 abstract_instruction_reader& reader)
230 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
232 /* Track X registers and D registers in prologue. */
233 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
235 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
236 regs[i] = pv_register (i, 0);
237 pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
239 for (; start < limit; start += 4)
244 insn = reader.read (start, 4, byte_order_for_code);
246 if (aarch64_decode_insn (insn, &inst, 1) != 0)
249 if (inst.opcode->iclass == addsub_imm
250 && (inst.opcode->op == OP_ADD
251 || strcmp ("sub", inst.opcode->name) == 0))
253 unsigned rd = inst.operands[0].reg.regno;
254 unsigned rn = inst.operands[1].reg.regno;
256 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
257 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
258 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
259 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
261 if (inst.opcode->op == OP_ADD)
263 regs[rd] = pv_add_constant (regs[rn],
264 inst.operands[2].imm.value);
268 regs[rd] = pv_add_constant (regs[rn],
269 -inst.operands[2].imm.value);
272 else if (inst.opcode->iclass == pcreladdr
273 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
275 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
276 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
278 regs[inst.operands[0].reg.regno] = pv_unknown ();
280 else if (inst.opcode->iclass == branch_imm)
282 /* Stop analysis on branch. */
285 else if (inst.opcode->iclass == condbranch)
287 /* Stop analysis on branch. */
290 else if (inst.opcode->iclass == branch_reg)
292 /* Stop analysis on branch. */
295 else if (inst.opcode->iclass == compbranch)
297 /* Stop analysis on branch. */
300 else if (inst.opcode->op == OP_MOVZ)
302 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
303 regs[inst.operands[0].reg.regno] = pv_unknown ();
305 else if (inst.opcode->iclass == log_shift
306 && strcmp (inst.opcode->name, "orr") == 0)
308 unsigned rd = inst.operands[0].reg.regno;
309 unsigned rn = inst.operands[1].reg.regno;
310 unsigned rm = inst.operands[2].reg.regno;
312 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
313 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
314 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
316 if (inst.operands[2].shifter.amount == 0
317 && rn == AARCH64_SP_REGNUM)
323 debug_printf ("aarch64: prologue analysis gave up "
324 "addr=%s opcode=0x%x (orr x register)\n",
325 core_addr_to_string_nz (start), insn);
330 else if (inst.opcode->op == OP_STUR)
332 unsigned rt = inst.operands[0].reg.regno;
333 unsigned rn = inst.operands[1].addr.base_regno;
335 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
337 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
338 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
339 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
340 gdb_assert (!inst.operands[1].addr.offset.is_reg);
342 stack.store (pv_add_constant (regs[rn],
343 inst.operands[1].addr.offset.imm),
344 is64 ? 8 : 4, regs[rt]);
346 else if ((inst.opcode->iclass == ldstpair_off
347 || (inst.opcode->iclass == ldstpair_indexed
348 && inst.operands[2].addr.preind))
349 && strcmp ("stp", inst.opcode->name) == 0)
351 /* STP with addressing mode Pre-indexed and Base register. */
354 unsigned rn = inst.operands[2].addr.base_regno;
355 int32_t imm = inst.operands[2].addr.offset.imm;
357 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
358 || inst.operands[0].type == AARCH64_OPND_Ft);
359 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
360 || inst.operands[1].type == AARCH64_OPND_Ft2);
361 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
362 gdb_assert (!inst.operands[2].addr.offset.is_reg);
364 /* If recording this store would invalidate the store area
365 (perhaps because rn is not known) then we should abandon
366 further prologue analysis. */
367 if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
370 if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
373 rt1 = inst.operands[0].reg.regno;
374 rt2 = inst.operands[1].reg.regno;
375 if (inst.operands[0].type == AARCH64_OPND_Ft)
377 /* Only bottom 64-bit of each V register (D register) need
379 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
380 rt1 += AARCH64_X_REGISTER_COUNT;
381 rt2 += AARCH64_X_REGISTER_COUNT;
384 stack.store (pv_add_constant (regs[rn], imm), 8,
386 stack.store (pv_add_constant (regs[rn], imm + 8), 8,
389 if (inst.operands[2].addr.writeback)
390 regs[rn] = pv_add_constant (regs[rn], imm);
393 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
394 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
395 && (inst.opcode->op == OP_STR_POS
396 || inst.opcode->op == OP_STRF_POS)))
397 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
398 && strcmp ("str", inst.opcode->name) == 0)
400 /* STR (immediate) */
401 unsigned int rt = inst.operands[0].reg.regno;
402 int32_t imm = inst.operands[1].addr.offset.imm;
403 unsigned int rn = inst.operands[1].addr.base_regno;
405 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
406 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
407 || inst.operands[0].type == AARCH64_OPND_Ft);
409 if (inst.operands[0].type == AARCH64_OPND_Ft)
411 /* Only bottom 64-bit of each V register (D register) need
413 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
414 rt += AARCH64_X_REGISTER_COUNT;
417 stack.store (pv_add_constant (regs[rn], imm),
418 is64 ? 8 : 4, regs[rt]);
419 if (inst.operands[1].addr.writeback)
420 regs[rn] = pv_add_constant (regs[rn], imm);
422 else if (inst.opcode->iclass == testbranch)
424 /* Stop analysis on branch. */
431 debug_printf ("aarch64: prologue analysis gave up addr=%s"
433 core_addr_to_string_nz (start), insn);
442 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
444 /* Frame pointer is fp. Frame size is constant. */
445 cache->framereg = AARCH64_FP_REGNUM;
446 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
448 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
450 /* Try the stack pointer. */
451 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
452 cache->framereg = AARCH64_SP_REGNUM;
456 /* We're just out of luck. We don't know where the frame is. */
457 cache->framereg = -1;
458 cache->framesize = 0;
461 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
465 if (stack.find_reg (gdbarch, i, &offset))
466 cache->saved_regs[i].addr = offset;
469 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
471 int regnum = gdbarch_num_regs (gdbarch);
474 if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
476 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
483 aarch64_analyze_prologue (struct gdbarch *gdbarch,
484 CORE_ADDR start, CORE_ADDR limit,
485 struct aarch64_prologue_cache *cache)
487 instruction_reader reader;
489 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
495 namespace selftests {
497 /* Instruction reader from manually cooked instruction sequences. */
499 class instruction_reader_test : public abstract_instruction_reader
502 template<size_t SIZE>
503 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
504 : m_insns (insns), m_insns_size (SIZE)
507 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
509 SELF_CHECK (len == 4);
510 SELF_CHECK (memaddr % 4 == 0);
511 SELF_CHECK (memaddr / 4 < m_insns_size);
513 return m_insns[memaddr / 4];
517 const uint32_t *m_insns;
522 aarch64_analyze_prologue_test (void)
524 struct gdbarch_info info;
526 gdbarch_info_init (&info);
527 info.bfd_arch_info = bfd_scan_arch ("aarch64");
529 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
530 SELF_CHECK (gdbarch != NULL);
532 /* Test the simple prologue in which frame pointer is used. */
534 struct aarch64_prologue_cache cache;
535 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
537 static const uint32_t insns[] = {
538 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
539 0x910003fd, /* mov x29, sp */
540 0x97ffffe6, /* bl 0x400580 */
542 instruction_reader_test reader (insns);
544 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
545 SELF_CHECK (end == 4 * 2);
547 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
548 SELF_CHECK (cache.framesize == 272);
550 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
552 if (i == AARCH64_FP_REGNUM)
553 SELF_CHECK (cache.saved_regs[i].addr == -272);
554 else if (i == AARCH64_LR_REGNUM)
555 SELF_CHECK (cache.saved_regs[i].addr == -264);
557 SELF_CHECK (cache.saved_regs[i].addr == -1);
560 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
562 int regnum = gdbarch_num_regs (gdbarch);
564 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
569 /* Test a prologue in which STR is used and frame pointer is not
572 struct aarch64_prologue_cache cache;
573 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
575 static const uint32_t insns[] = {
576 0xf81d0ff3, /* str x19, [sp, #-48]! */
577 0xb9002fe0, /* str w0, [sp, #44] */
578 0xf90013e1, /* str x1, [sp, #32]*/
579 0xfd000fe0, /* str d0, [sp, #24] */
580 0xaa0203f3, /* mov x19, x2 */
581 0xf94013e0, /* ldr x0, [sp, #32] */
583 instruction_reader_test reader (insns);
585 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
587 SELF_CHECK (end == 4 * 5);
589 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
590 SELF_CHECK (cache.framesize == 48);
592 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
595 SELF_CHECK (cache.saved_regs[i].addr == -16);
597 SELF_CHECK (cache.saved_regs[i].addr == -48);
599 SELF_CHECK (cache.saved_regs[i].addr == -1);
602 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
604 int regnum = gdbarch_num_regs (gdbarch);
607 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
610 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
615 } // namespace selftests
616 #endif /* GDB_SELF_TEST */
618 /* Implement the "skip_prologue" gdbarch method. */
621 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
623 CORE_ADDR func_addr, limit_pc;
625 /* See if we can determine the end of the prologue via the symbol
626 table. If so, then return either PC, or the PC after the
627 prologue, whichever is greater. */
628 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
630 CORE_ADDR post_prologue_pc
631 = skip_prologue_using_sal (gdbarch, func_addr);
633 if (post_prologue_pc != 0)
634 return std::max (pc, post_prologue_pc);
637 /* Can't determine prologue from the symbol table, need to examine
640 /* Find an upper limit on the function prologue using the debug
641 information. If the debug information could not be used to
642 provide that bound, then use an arbitrary large number as the
644 limit_pc = skip_prologue_using_sal (gdbarch, pc);
646 limit_pc = pc + 128; /* Magic. */
648 /* Try disassembling prologue. */
649 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
652 /* Scan the function prologue for THIS_FRAME and populate the prologue
656 aarch64_scan_prologue (struct frame_info *this_frame,
657 struct aarch64_prologue_cache *cache)
659 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
660 CORE_ADDR prologue_start;
661 CORE_ADDR prologue_end;
662 CORE_ADDR prev_pc = get_frame_pc (this_frame);
663 struct gdbarch *gdbarch = get_frame_arch (this_frame);
665 cache->prev_pc = prev_pc;
667 /* Assume we do not find a frame. */
668 cache->framereg = -1;
669 cache->framesize = 0;
671 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
674 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
678 /* No line info so use the current PC. */
679 prologue_end = prev_pc;
681 else if (sal.end < prologue_end)
683 /* The next line begins after the function end. */
684 prologue_end = sal.end;
687 prologue_end = std::min (prologue_end, prev_pc);
688 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
694 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
698 cache->framereg = AARCH64_FP_REGNUM;
699 cache->framesize = 16;
700 cache->saved_regs[29].addr = 0;
701 cache->saved_regs[30].addr = 8;
705 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
706 function may throw an exception if the inferior's registers or memory is
710 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
711 struct aarch64_prologue_cache *cache)
713 CORE_ADDR unwound_fp;
716 aarch64_scan_prologue (this_frame, cache);
718 if (cache->framereg == -1)
721 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
725 cache->prev_sp = unwound_fp + cache->framesize;
727 /* Calculate actual addresses of saved registers using offsets
728 determined by aarch64_analyze_prologue. */
729 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
730 if (trad_frame_addr_p (cache->saved_regs, reg))
731 cache->saved_regs[reg].addr += cache->prev_sp;
733 cache->func = get_frame_func (this_frame);
735 cache->available_p = 1;
738 /* Allocate and fill in *THIS_CACHE with information about the prologue of
739 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
740 Return a pointer to the current aarch64_prologue_cache in
743 static struct aarch64_prologue_cache *
744 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
746 struct aarch64_prologue_cache *cache;
748 if (*this_cache != NULL)
749 return (struct aarch64_prologue_cache *) *this_cache;
751 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
752 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
757 aarch64_make_prologue_cache_1 (this_frame, cache);
759 CATCH (ex, RETURN_MASK_ERROR)
761 if (ex.error != NOT_AVAILABLE_ERROR)
762 throw_exception (ex);
769 /* Implement the "stop_reason" frame_unwind method. */
771 static enum unwind_stop_reason
772 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
775 struct aarch64_prologue_cache *cache
776 = aarch64_make_prologue_cache (this_frame, this_cache);
778 if (!cache->available_p)
779 return UNWIND_UNAVAILABLE;
781 /* Halt the backtrace at "_start". */
782 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
783 return UNWIND_OUTERMOST;
785 /* We've hit a wall, stop. */
786 if (cache->prev_sp == 0)
787 return UNWIND_OUTERMOST;
789 return UNWIND_NO_REASON;
792 /* Our frame ID for a normal frame is the current function's starting
793 PC and the caller's SP when we were called. */
796 aarch64_prologue_this_id (struct frame_info *this_frame,
797 void **this_cache, struct frame_id *this_id)
799 struct aarch64_prologue_cache *cache
800 = aarch64_make_prologue_cache (this_frame, this_cache);
802 if (!cache->available_p)
803 *this_id = frame_id_build_unavailable_stack (cache->func);
805 *this_id = frame_id_build (cache->prev_sp, cache->func);
808 /* Implement the "prev_register" frame_unwind method. */
810 static struct value *
811 aarch64_prologue_prev_register (struct frame_info *this_frame,
812 void **this_cache, int prev_regnum)
814 struct aarch64_prologue_cache *cache
815 = aarch64_make_prologue_cache (this_frame, this_cache);
817 /* If we are asked to unwind the PC, then we need to return the LR
818 instead. The prologue may save PC, but it will point into this
819 frame's prologue, not the next frame's resume location. */
820 if (prev_regnum == AARCH64_PC_REGNUM)
824 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
825 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
828 /* SP is generally not saved to the stack, but this frame is
829 identified by the next frame's stack pointer at the time of the
830 call. The value was already reconstructed into PREV_SP. */
843 if (prev_regnum == AARCH64_SP_REGNUM)
844 return frame_unwind_got_constant (this_frame, prev_regnum,
847 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
851 /* AArch64 prologue unwinder. */
852 struct frame_unwind aarch64_prologue_unwind =
855 aarch64_prologue_frame_unwind_stop_reason,
856 aarch64_prologue_this_id,
857 aarch64_prologue_prev_register,
859 default_frame_sniffer
862 /* Allocate and fill in *THIS_CACHE with information about the prologue of
863 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
864 Return a pointer to the current aarch64_prologue_cache in
867 static struct aarch64_prologue_cache *
868 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
870 struct aarch64_prologue_cache *cache;
872 if (*this_cache != NULL)
873 return (struct aarch64_prologue_cache *) *this_cache;
875 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
876 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
881 cache->prev_sp = get_frame_register_unsigned (this_frame,
883 cache->prev_pc = get_frame_pc (this_frame);
884 cache->available_p = 1;
886 CATCH (ex, RETURN_MASK_ERROR)
888 if (ex.error != NOT_AVAILABLE_ERROR)
889 throw_exception (ex);
896 /* Implement the "stop_reason" frame_unwind method. */
898 static enum unwind_stop_reason
899 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
902 struct aarch64_prologue_cache *cache
903 = aarch64_make_stub_cache (this_frame, this_cache);
905 if (!cache->available_p)
906 return UNWIND_UNAVAILABLE;
908 return UNWIND_NO_REASON;
911 /* Our frame ID for a stub frame is the current SP and LR. */
914 aarch64_stub_this_id (struct frame_info *this_frame,
915 void **this_cache, struct frame_id *this_id)
917 struct aarch64_prologue_cache *cache
918 = aarch64_make_stub_cache (this_frame, this_cache);
920 if (cache->available_p)
921 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
923 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
926 /* Implement the "sniffer" frame_unwind method. */
929 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
930 struct frame_info *this_frame,
931 void **this_prologue_cache)
933 CORE_ADDR addr_in_block;
936 addr_in_block = get_frame_address_in_block (this_frame);
937 if (in_plt_section (addr_in_block)
938 /* We also use the stub winder if the target memory is unreadable
939 to avoid having the prologue unwinder trying to read it. */
940 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
946 /* AArch64 stub unwinder. */
947 struct frame_unwind aarch64_stub_unwind =
950 aarch64_stub_frame_unwind_stop_reason,
951 aarch64_stub_this_id,
952 aarch64_prologue_prev_register,
954 aarch64_stub_unwind_sniffer
957 /* Return the frame base address of *THIS_FRAME. */
960 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
962 struct aarch64_prologue_cache *cache
963 = aarch64_make_prologue_cache (this_frame, this_cache);
965 return cache->prev_sp - cache->framesize;
968 /* AArch64 default frame base information. */
969 struct frame_base aarch64_normal_base =
971 &aarch64_prologue_unwind,
972 aarch64_normal_frame_base,
973 aarch64_normal_frame_base,
974 aarch64_normal_frame_base
977 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
978 dummy frame. The frame ID's base needs to match the TOS value
979 saved by save_dummy_frame_tos () and returned from
980 aarch64_push_dummy_call, and the PC needs to match the dummy
981 frame's breakpoint. */
983 static struct frame_id
984 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
986 return frame_id_build (get_frame_register_unsigned (this_frame,
988 get_frame_pc (this_frame));
991 /* Implement the "unwind_pc" gdbarch method. */
994 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
997 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1002 /* Implement the "unwind_sp" gdbarch method. */
1005 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1007 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1010 /* Return the value of the REGNUM register in the previous frame of
1013 static struct value *
1014 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1015 void **this_cache, int regnum)
1021 case AARCH64_PC_REGNUM:
1022 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1023 return frame_unwind_got_constant (this_frame, regnum, lr);
1026 internal_error (__FILE__, __LINE__,
1027 _("Unexpected register %d"), regnum);
1031 /* Implement the "init_reg" dwarf2_frame_ops method. */
1034 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1035 struct dwarf2_frame_state_reg *reg,
1036 struct frame_info *this_frame)
1040 case AARCH64_PC_REGNUM:
1041 reg->how = DWARF2_FRAME_REG_FN;
1042 reg->loc.fn = aarch64_dwarf2_prev_register;
1044 case AARCH64_SP_REGNUM:
1045 reg->how = DWARF2_FRAME_REG_CFA;
1050 /* When arguments must be pushed onto the stack, they go on in reverse
1051 order. The code below implements a FILO (stack) to do this. */
1055 /* Value to pass on stack. It can be NULL if this item is for stack
1057 const gdb_byte *data;
1059 /* Size in bytes of value to pass on stack. */
1063 DEF_VEC_O (stack_item_t);
1065 /* Return the alignment (in bytes) of the given type. */
1068 aarch64_type_align (struct type *t)
1074 t = check_typedef (t);
1075 switch (TYPE_CODE (t))
1078 /* Should never happen. */
1079 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1083 case TYPE_CODE_ENUM:
1087 case TYPE_CODE_RANGE:
1088 case TYPE_CODE_BITSTRING:
1090 case TYPE_CODE_RVALUE_REF:
1091 case TYPE_CODE_CHAR:
1092 case TYPE_CODE_BOOL:
1093 return TYPE_LENGTH (t);
1095 case TYPE_CODE_ARRAY:
1096 if (TYPE_VECTOR (t))
1098 /* Use the natural alignment for vector types (the same for
1099 scalar type), but the maximum alignment is 128-bit. */
1100 if (TYPE_LENGTH (t) > 16)
1103 return TYPE_LENGTH (t);
1106 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1107 case TYPE_CODE_COMPLEX:
1108 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1110 case TYPE_CODE_STRUCT:
1111 case TYPE_CODE_UNION:
1113 for (n = 0; n < TYPE_NFIELDS (t); n++)
1115 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1123 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1124 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1125 document; otherwise return 0. */
1128 is_hfa_or_hva (struct type *ty)
1130 switch (TYPE_CODE (ty))
1132 case TYPE_CODE_ARRAY:
1134 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1136 if (TYPE_VECTOR (ty))
1139 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1140 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1141 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1142 && TYPE_VECTOR (target_ty))))
1147 case TYPE_CODE_UNION:
1148 case TYPE_CODE_STRUCT:
1150 /* HFA or HVA has at most four members. */
1151 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1153 struct type *member0_type;
1155 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1156 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1157 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1158 && TYPE_VECTOR (member0_type)))
1162 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1164 struct type *member1_type;
1166 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1167 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1168 || (TYPE_LENGTH (member0_type)
1169 != TYPE_LENGTH (member1_type)))
1185 /* AArch64 function call information structure. */
1186 struct aarch64_call_info
1188 /* the current argument number. */
1191 /* The next general purpose register number, equivalent to NGRN as
1192 described in the AArch64 Procedure Call Standard. */
1195 /* The next SIMD and floating point register number, equivalent to
1196 NSRN as described in the AArch64 Procedure Call Standard. */
1199 /* The next stacked argument address, equivalent to NSAA as
1200 described in the AArch64 Procedure Call Standard. */
1203 /* Stack item vector. */
1204 VEC(stack_item_t) *si;
1207 /* Pass a value in a sequence of consecutive X registers. The caller
1208 is responsbile for ensuring sufficient registers are available. */
1211 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1212 struct aarch64_call_info *info, struct type *type,
1215 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1216 int len = TYPE_LENGTH (type);
1217 enum type_code typecode = TYPE_CODE (type);
1218 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1219 const bfd_byte *buf = value_contents (arg);
1225 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1226 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1230 /* Adjust sub-word struct/union args when big-endian. */
1231 if (byte_order == BFD_ENDIAN_BIG
1232 && partial_len < X_REGISTER_SIZE
1233 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1234 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1238 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1239 gdbarch_register_name (gdbarch, regnum),
1240 phex (regval, X_REGISTER_SIZE));
1242 regcache_cooked_write_unsigned (regcache, regnum, regval);
1249 /* Attempt to marshall a value in a V register. Return 1 if
1250 successful, or 0 if insufficient registers are available. This
1251 function, unlike the equivalent pass_in_x() function does not
1252 handle arguments spread across multiple registers. */
1255 pass_in_v (struct gdbarch *gdbarch,
1256 struct regcache *regcache,
1257 struct aarch64_call_info *info,
1258 int len, const bfd_byte *buf)
1262 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1263 gdb_byte reg[V_REGISTER_SIZE];
1268 memset (reg, 0, sizeof (reg));
1269 /* PCS C.1, the argument is allocated to the least significant
1270 bits of V register. */
1271 memcpy (reg, buf, len);
1272 regcache_cooked_write (regcache, regnum, reg);
1276 debug_printf ("arg %d in %s\n", info->argnum,
1277 gdbarch_register_name (gdbarch, regnum));
1285 /* Marshall an argument onto the stack. */
1288 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1291 const bfd_byte *buf = value_contents (arg);
1292 int len = TYPE_LENGTH (type);
1298 align = aarch64_type_align (type);
1300 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1301 Natural alignment of the argument's type. */
1302 align = align_up (align, 8);
1304 /* The AArch64 PCS requires at most doubleword alignment. */
1310 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1316 VEC_safe_push (stack_item_t, info->si, &item);
1319 if (info->nsaa & (align - 1))
1321 /* Push stack alignment padding. */
1322 int pad = align - (info->nsaa & (align - 1));
1327 VEC_safe_push (stack_item_t, info->si, &item);
1332 /* Marshall an argument into a sequence of one or more consecutive X
1333 registers or, if insufficient X registers are available then onto
1337 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1338 struct aarch64_call_info *info, struct type *type,
1341 int len = TYPE_LENGTH (type);
1342 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1344 /* PCS C.13 - Pass in registers if we have enough spare */
1345 if (info->ngrn + nregs <= 8)
1347 pass_in_x (gdbarch, regcache, info, type, arg);
1348 info->ngrn += nregs;
1353 pass_on_stack (info, type, arg);
1357 /* Pass a value in a V register, or on the stack if insufficient are
1361 pass_in_v_or_stack (struct gdbarch *gdbarch,
1362 struct regcache *regcache,
1363 struct aarch64_call_info *info,
1367 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1368 value_contents (arg)))
1369 pass_on_stack (info, type, arg);
1372 /* Implement the "push_dummy_call" gdbarch method. */
1375 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1376 struct regcache *regcache, CORE_ADDR bp_addr,
1378 struct value **args, CORE_ADDR sp, int struct_return,
1379 CORE_ADDR struct_addr)
1382 struct aarch64_call_info info;
1383 struct type *func_type;
1384 struct type *return_type;
1385 int lang_struct_return;
1387 memset (&info, 0, sizeof (info));
1389 /* We need to know what the type of the called function is in order
1390 to determine the number of named/anonymous arguments for the
1391 actual argument placement, and the return type in order to handle
1392 return value correctly.
1394 The generic code above us views the decision of return in memory
1395 or return in registers as a two stage processes. The language
1396 handler is consulted first and may decide to return in memory (eg
1397 class with copy constructor returned by value), this will cause
1398 the generic code to allocate space AND insert an initial leading
1401 If the language code does not decide to pass in memory then the
1402 target code is consulted.
1404 If the language code decides to pass in memory we want to move
1405 the pointer inserted as the initial argument from the argument
1406 list and into X8, the conventional AArch64 struct return pointer
1409 This is slightly awkward, ideally the flag "lang_struct_return"
1410 would be passed to the targets implementation of push_dummy_call.
1411 Rather that change the target interface we call the language code
1412 directly ourselves. */
1414 func_type = check_typedef (value_type (function));
1416 /* Dereference function pointer types. */
1417 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1418 func_type = TYPE_TARGET_TYPE (func_type);
1420 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1421 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1423 /* If language_pass_by_reference () returned true we will have been
1424 given an additional initial argument, a hidden pointer to the
1425 return slot in memory. */
1426 return_type = TYPE_TARGET_TYPE (func_type);
1427 lang_struct_return = language_pass_by_reference (return_type);
1429 /* Set the return address. For the AArch64, the return breakpoint
1430 is always at BP_ADDR. */
1431 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1433 /* If we were given an initial argument for the return slot because
1434 lang_struct_return was true, lose it. */
1435 if (lang_struct_return)
1441 /* The struct_return pointer occupies X8. */
1442 if (struct_return || lang_struct_return)
1446 debug_printf ("struct return in %s = 0x%s\n",
1447 gdbarch_register_name (gdbarch,
1448 AARCH64_STRUCT_RETURN_REGNUM),
1449 paddress (gdbarch, struct_addr));
1451 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1455 for (argnum = 0; argnum < nargs; argnum++)
1457 struct value *arg = args[argnum];
1458 struct type *arg_type;
1461 arg_type = check_typedef (value_type (arg));
1462 len = TYPE_LENGTH (arg_type);
1464 switch (TYPE_CODE (arg_type))
1467 case TYPE_CODE_BOOL:
1468 case TYPE_CODE_CHAR:
1469 case TYPE_CODE_RANGE:
1470 case TYPE_CODE_ENUM:
1473 /* Promote to 32 bit integer. */
1474 if (TYPE_UNSIGNED (arg_type))
1475 arg_type = builtin_type (gdbarch)->builtin_uint32;
1477 arg_type = builtin_type (gdbarch)->builtin_int32;
1478 arg = value_cast (arg_type, arg);
1480 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1483 case TYPE_CODE_COMPLEX:
1486 const bfd_byte *buf = value_contents (arg);
1487 struct type *target_type =
1488 check_typedef (TYPE_TARGET_TYPE (arg_type));
1490 pass_in_v (gdbarch, regcache, &info,
1491 TYPE_LENGTH (target_type), buf);
1492 pass_in_v (gdbarch, regcache, &info,
1493 TYPE_LENGTH (target_type),
1494 buf + TYPE_LENGTH (target_type));
1499 pass_on_stack (&info, arg_type, arg);
1503 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1506 case TYPE_CODE_STRUCT:
1507 case TYPE_CODE_ARRAY:
1508 case TYPE_CODE_UNION:
1509 if (is_hfa_or_hva (arg_type))
1511 int elements = TYPE_NFIELDS (arg_type);
1513 /* Homogeneous Aggregates */
1514 if (info.nsrn + elements < 8)
1518 for (i = 0; i < elements; i++)
1520 /* We know that we have sufficient registers
1521 available therefore this will never fallback
1523 struct value *field =
1524 value_primitive_field (arg, 0, i, arg_type);
1525 struct type *field_type =
1526 check_typedef (value_type (field));
1528 pass_in_v_or_stack (gdbarch, regcache, &info,
1535 pass_on_stack (&info, arg_type, arg);
1538 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1539 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1541 /* Short vector types are passed in V registers. */
1542 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1546 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1547 invisible reference. */
1549 /* Allocate aligned storage. */
1550 sp = align_down (sp - len, 16);
1552 /* Write the real data into the stack. */
1553 write_memory (sp, value_contents (arg), len);
1555 /* Construct the indirection. */
1556 arg_type = lookup_pointer_type (arg_type);
1557 arg = value_from_pointer (arg_type, sp);
1558 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1561 /* PCS C.15 / C.18 multiple values pass. */
1562 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1566 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1571 /* Make sure stack retains 16 byte alignment. */
1573 sp -= 16 - (info.nsaa & 15);
1575 while (!VEC_empty (stack_item_t, info.si))
1577 stack_item_t *si = VEC_last (stack_item_t, info.si);
1580 if (si->data != NULL)
1581 write_memory (sp, si->data, si->len);
1582 VEC_pop (stack_item_t, info.si);
1585 VEC_free (stack_item_t, info.si);
1587 /* Finally, update the SP register. */
1588 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1593 /* Implement the "frame_align" gdbarch method. */
1596 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1598 /* Align the stack to sixteen bytes. */
1599 return sp & ~(CORE_ADDR) 15;
1602 /* Return the type for an AdvSISD Q register. */
1604 static struct type *
1605 aarch64_vnq_type (struct gdbarch *gdbarch)
1607 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1609 if (tdep->vnq_type == NULL)
1614 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1617 elem = builtin_type (gdbarch)->builtin_uint128;
1618 append_composite_type_field (t, "u", elem);
1620 elem = builtin_type (gdbarch)->builtin_int128;
1621 append_composite_type_field (t, "s", elem);
1626 return tdep->vnq_type;
1629 /* Return the type for an AdvSISD D register. */
1631 static struct type *
1632 aarch64_vnd_type (struct gdbarch *gdbarch)
1634 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1636 if (tdep->vnd_type == NULL)
1641 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1644 elem = builtin_type (gdbarch)->builtin_double;
1645 append_composite_type_field (t, "f", elem);
1647 elem = builtin_type (gdbarch)->builtin_uint64;
1648 append_composite_type_field (t, "u", elem);
1650 elem = builtin_type (gdbarch)->builtin_int64;
1651 append_composite_type_field (t, "s", elem);
1656 return tdep->vnd_type;
1659 /* Return the type for an AdvSISD S register. */
1661 static struct type *
1662 aarch64_vns_type (struct gdbarch *gdbarch)
1664 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1666 if (tdep->vns_type == NULL)
1671 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1674 elem = builtin_type (gdbarch)->builtin_float;
1675 append_composite_type_field (t, "f", elem);
1677 elem = builtin_type (gdbarch)->builtin_uint32;
1678 append_composite_type_field (t, "u", elem);
1680 elem = builtin_type (gdbarch)->builtin_int32;
1681 append_composite_type_field (t, "s", elem);
1686 return tdep->vns_type;
1689 /* Return the type for an AdvSISD H register. */
1691 static struct type *
1692 aarch64_vnh_type (struct gdbarch *gdbarch)
1694 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1696 if (tdep->vnh_type == NULL)
1701 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1704 elem = builtin_type (gdbarch)->builtin_uint16;
1705 append_composite_type_field (t, "u", elem);
1707 elem = builtin_type (gdbarch)->builtin_int16;
1708 append_composite_type_field (t, "s", elem);
1713 return tdep->vnh_type;
1716 /* Return the type for an AdvSISD B register. */
1718 static struct type *
1719 aarch64_vnb_type (struct gdbarch *gdbarch)
1721 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1723 if (tdep->vnb_type == NULL)
1728 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1731 elem = builtin_type (gdbarch)->builtin_uint8;
1732 append_composite_type_field (t, "u", elem);
1734 elem = builtin_type (gdbarch)->builtin_int8;
1735 append_composite_type_field (t, "s", elem);
1740 return tdep->vnb_type;
1743 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1746 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1748 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1749 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1751 if (reg == AARCH64_DWARF_SP)
1752 return AARCH64_SP_REGNUM;
1754 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1755 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1761 /* Implement the "print_insn" gdbarch method. */
1764 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1766 info->symbols = NULL;
1767 return default_print_insn (memaddr, info);
1770 /* AArch64 BRK software debug mode instruction.
1771 Note that AArch64 code is always little-endian.
1772 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1773 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1775 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1777 /* Extract from an array REGS containing the (raw) register state a
1778 function return value of type TYPE, and copy that, in virtual
1779 format, into VALBUF. */
1782 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1785 struct gdbarch *gdbarch = regs->arch ();
1786 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1788 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1790 bfd_byte buf[V_REGISTER_SIZE];
1791 int len = TYPE_LENGTH (type);
1793 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1794 memcpy (valbuf, buf, len);
1796 else if (TYPE_CODE (type) == TYPE_CODE_INT
1797 || TYPE_CODE (type) == TYPE_CODE_CHAR
1798 || TYPE_CODE (type) == TYPE_CODE_BOOL
1799 || TYPE_CODE (type) == TYPE_CODE_PTR
1800 || TYPE_IS_REFERENCE (type)
1801 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1803 /* If the the type is a plain integer, then the access is
1804 straight-forward. Otherwise we have to play around a bit
1806 int len = TYPE_LENGTH (type);
1807 int regno = AARCH64_X0_REGNUM;
1812 /* By using store_unsigned_integer we avoid having to do
1813 anything special for small big-endian values. */
1814 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1815 store_unsigned_integer (valbuf,
1816 (len > X_REGISTER_SIZE
1817 ? X_REGISTER_SIZE : len), byte_order, tmp);
1818 len -= X_REGISTER_SIZE;
1819 valbuf += X_REGISTER_SIZE;
1822 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1824 int regno = AARCH64_V0_REGNUM;
1825 bfd_byte buf[V_REGISTER_SIZE];
1826 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1827 int len = TYPE_LENGTH (target_type);
1829 regcache_cooked_read (regs, regno, buf);
1830 memcpy (valbuf, buf, len);
1832 regcache_cooked_read (regs, regno + 1, buf);
1833 memcpy (valbuf, buf, len);
1836 else if (is_hfa_or_hva (type))
1838 int elements = TYPE_NFIELDS (type);
1839 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1840 int len = TYPE_LENGTH (member_type);
1843 for (i = 0; i < elements; i++)
1845 int regno = AARCH64_V0_REGNUM + i;
1846 bfd_byte buf[V_REGISTER_SIZE];
1850 debug_printf ("read HFA or HVA return value element %d from %s\n",
1852 gdbarch_register_name (gdbarch, regno));
1854 regcache_cooked_read (regs, regno, buf);
1856 memcpy (valbuf, buf, len);
1860 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1861 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1863 /* Short vector is returned in V register. */
1864 gdb_byte buf[V_REGISTER_SIZE];
1866 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1867 memcpy (valbuf, buf, TYPE_LENGTH (type));
1871 /* For a structure or union the behaviour is as if the value had
1872 been stored to word-aligned memory and then loaded into
1873 registers with 64-bit load instruction(s). */
1874 int len = TYPE_LENGTH (type);
1875 int regno = AARCH64_X0_REGNUM;
1876 bfd_byte buf[X_REGISTER_SIZE];
1880 regcache_cooked_read (regs, regno++, buf);
1881 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1882 len -= X_REGISTER_SIZE;
1883 valbuf += X_REGISTER_SIZE;
1889 /* Will a function return an aggregate type in memory or in a
1890 register? Return 0 if an aggregate type can be returned in a
1891 register, 1 if it must be returned in memory. */
1894 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1896 type = check_typedef (type);
1898 if (is_hfa_or_hva (type))
1900 /* v0-v7 are used to return values and one register is allocated
1901 for one member. However, HFA or HVA has at most four members. */
1905 if (TYPE_LENGTH (type) > 16)
1907 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1908 invisible reference. */
1916 /* Write into appropriate registers a function return value of type
1917 TYPE, given in virtual format. */
1920 aarch64_store_return_value (struct type *type, struct regcache *regs,
1921 const gdb_byte *valbuf)
1923 struct gdbarch *gdbarch = regs->arch ();
1924 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1926 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1928 bfd_byte buf[V_REGISTER_SIZE];
1929 int len = TYPE_LENGTH (type);
1931 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1932 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1934 else if (TYPE_CODE (type) == TYPE_CODE_INT
1935 || TYPE_CODE (type) == TYPE_CODE_CHAR
1936 || TYPE_CODE (type) == TYPE_CODE_BOOL
1937 || TYPE_CODE (type) == TYPE_CODE_PTR
1938 || TYPE_IS_REFERENCE (type)
1939 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1941 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1943 /* Values of one word or less are zero/sign-extended and
1945 bfd_byte tmpbuf[X_REGISTER_SIZE];
1946 LONGEST val = unpack_long (type, valbuf);
1948 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1949 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1953 /* Integral values greater than one word are stored in
1954 consecutive registers starting with r0. This will always
1955 be a multiple of the regiser size. */
1956 int len = TYPE_LENGTH (type);
1957 int regno = AARCH64_X0_REGNUM;
1961 regcache_cooked_write (regs, regno++, valbuf);
1962 len -= X_REGISTER_SIZE;
1963 valbuf += X_REGISTER_SIZE;
1967 else if (is_hfa_or_hva (type))
1969 int elements = TYPE_NFIELDS (type);
1970 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1971 int len = TYPE_LENGTH (member_type);
1974 for (i = 0; i < elements; i++)
1976 int regno = AARCH64_V0_REGNUM + i;
1977 bfd_byte tmpbuf[V_REGISTER_SIZE];
1981 debug_printf ("write HFA or HVA return value element %d to %s\n",
1983 gdbarch_register_name (gdbarch, regno));
1986 memcpy (tmpbuf, valbuf, len);
1987 regcache_cooked_write (regs, regno, tmpbuf);
1991 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1992 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
1995 gdb_byte buf[V_REGISTER_SIZE];
1997 memcpy (buf, valbuf, TYPE_LENGTH (type));
1998 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2002 /* For a structure or union the behaviour is as if the value had
2003 been stored to word-aligned memory and then loaded into
2004 registers with 64-bit load instruction(s). */
2005 int len = TYPE_LENGTH (type);
2006 int regno = AARCH64_X0_REGNUM;
2007 bfd_byte tmpbuf[X_REGISTER_SIZE];
2011 memcpy (tmpbuf, valbuf,
2012 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2013 regcache_cooked_write (regs, regno++, tmpbuf);
2014 len -= X_REGISTER_SIZE;
2015 valbuf += X_REGISTER_SIZE;
2020 /* Implement the "return_value" gdbarch method. */
2022 static enum return_value_convention
2023 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2024 struct type *valtype, struct regcache *regcache,
2025 gdb_byte *readbuf, const gdb_byte *writebuf)
2028 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2029 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2030 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2032 if (aarch64_return_in_memory (gdbarch, valtype))
2035 debug_printf ("return value in memory\n");
2036 return RETURN_VALUE_STRUCT_CONVENTION;
2041 aarch64_store_return_value (valtype, regcache, writebuf);
2044 aarch64_extract_return_value (valtype, regcache, readbuf);
2047 debug_printf ("return value in registers\n");
2049 return RETURN_VALUE_REGISTER_CONVENTION;
2052 /* Implement the "get_longjmp_target" gdbarch method. */
2055 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2058 gdb_byte buf[X_REGISTER_SIZE];
2059 struct gdbarch *gdbarch = get_frame_arch (frame);
2060 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2061 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2063 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2065 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2069 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2073 /* Implement the "gen_return_address" gdbarch method. */
2076 aarch64_gen_return_address (struct gdbarch *gdbarch,
2077 struct agent_expr *ax, struct axs_value *value,
2080 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2081 value->kind = axs_lvalue_register;
2082 value->u.reg = AARCH64_LR_REGNUM;
2086 /* Return the pseudo register name corresponding to register regnum. */
2089 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2091 static const char *const q_name[] =
2093 "q0", "q1", "q2", "q3",
2094 "q4", "q5", "q6", "q7",
2095 "q8", "q9", "q10", "q11",
2096 "q12", "q13", "q14", "q15",
2097 "q16", "q17", "q18", "q19",
2098 "q20", "q21", "q22", "q23",
2099 "q24", "q25", "q26", "q27",
2100 "q28", "q29", "q30", "q31",
2103 static const char *const d_name[] =
2105 "d0", "d1", "d2", "d3",
2106 "d4", "d5", "d6", "d7",
2107 "d8", "d9", "d10", "d11",
2108 "d12", "d13", "d14", "d15",
2109 "d16", "d17", "d18", "d19",
2110 "d20", "d21", "d22", "d23",
2111 "d24", "d25", "d26", "d27",
2112 "d28", "d29", "d30", "d31",
2115 static const char *const s_name[] =
2117 "s0", "s1", "s2", "s3",
2118 "s4", "s5", "s6", "s7",
2119 "s8", "s9", "s10", "s11",
2120 "s12", "s13", "s14", "s15",
2121 "s16", "s17", "s18", "s19",
2122 "s20", "s21", "s22", "s23",
2123 "s24", "s25", "s26", "s27",
2124 "s28", "s29", "s30", "s31",
2127 static const char *const h_name[] =
2129 "h0", "h1", "h2", "h3",
2130 "h4", "h5", "h6", "h7",
2131 "h8", "h9", "h10", "h11",
2132 "h12", "h13", "h14", "h15",
2133 "h16", "h17", "h18", "h19",
2134 "h20", "h21", "h22", "h23",
2135 "h24", "h25", "h26", "h27",
2136 "h28", "h29", "h30", "h31",
2139 static const char *const b_name[] =
2141 "b0", "b1", "b2", "b3",
2142 "b4", "b5", "b6", "b7",
2143 "b8", "b9", "b10", "b11",
2144 "b12", "b13", "b14", "b15",
2145 "b16", "b17", "b18", "b19",
2146 "b20", "b21", "b22", "b23",
2147 "b24", "b25", "b26", "b27",
2148 "b28", "b29", "b30", "b31",
2151 regnum -= gdbarch_num_regs (gdbarch);
2153 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2154 return q_name[regnum - AARCH64_Q0_REGNUM];
2156 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2157 return d_name[regnum - AARCH64_D0_REGNUM];
2159 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2160 return s_name[regnum - AARCH64_S0_REGNUM];
2162 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2163 return h_name[regnum - AARCH64_H0_REGNUM];
2165 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2166 return b_name[regnum - AARCH64_B0_REGNUM];
2168 internal_error (__FILE__, __LINE__,
2169 _("aarch64_pseudo_register_name: bad register number %d"),
2173 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2175 static struct type *
2176 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2178 regnum -= gdbarch_num_regs (gdbarch);
2180 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2181 return aarch64_vnq_type (gdbarch);
2183 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2184 return aarch64_vnd_type (gdbarch);
2186 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2187 return aarch64_vns_type (gdbarch);
2189 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2190 return aarch64_vnh_type (gdbarch);
2192 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2193 return aarch64_vnb_type (gdbarch);
2195 internal_error (__FILE__, __LINE__,
2196 _("aarch64_pseudo_register_type: bad register number %d"),
2200 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2203 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2204 struct reggroup *group)
2206 regnum -= gdbarch_num_regs (gdbarch);
2208 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2209 return group == all_reggroup || group == vector_reggroup;
2210 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2211 return (group == all_reggroup || group == vector_reggroup
2212 || group == float_reggroup);
2213 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2214 return (group == all_reggroup || group == vector_reggroup
2215 || group == float_reggroup);
2216 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2217 return group == all_reggroup || group == vector_reggroup;
2218 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2219 return group == all_reggroup || group == vector_reggroup;
2221 return group == all_reggroup;
2224 /* Implement the "pseudo_register_read_value" gdbarch method. */
2226 static struct value *
2227 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2228 struct regcache *regcache,
2231 gdb_byte reg_buf[V_REGISTER_SIZE];
2232 struct value *result_value;
2235 result_value = allocate_value (register_type (gdbarch, regnum));
2236 VALUE_LVAL (result_value) = lval_register;
2237 VALUE_REGNUM (result_value) = regnum;
2238 buf = value_contents_raw (result_value);
2240 regnum -= gdbarch_num_regs (gdbarch);
2242 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2244 enum register_status status;
2247 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2248 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2249 if (status != REG_VALID)
2250 mark_value_bytes_unavailable (result_value, 0,
2251 TYPE_LENGTH (value_type (result_value)));
2253 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2254 return result_value;
2257 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2259 enum register_status status;
2262 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2263 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2264 if (status != REG_VALID)
2265 mark_value_bytes_unavailable (result_value, 0,
2266 TYPE_LENGTH (value_type (result_value)));
2268 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2269 return result_value;
2272 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2274 enum register_status status;
2277 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2278 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2279 if (status != REG_VALID)
2280 mark_value_bytes_unavailable (result_value, 0,
2281 TYPE_LENGTH (value_type (result_value)));
2283 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2284 return result_value;
2287 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2289 enum register_status status;
2292 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2293 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2294 if (status != REG_VALID)
2295 mark_value_bytes_unavailable (result_value, 0,
2296 TYPE_LENGTH (value_type (result_value)));
2298 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2299 return result_value;
2302 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2304 enum register_status status;
2307 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2308 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2309 if (status != REG_VALID)
2310 mark_value_bytes_unavailable (result_value, 0,
2311 TYPE_LENGTH (value_type (result_value)));
2313 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2314 return result_value;
2317 gdb_assert_not_reached ("regnum out of bound");
2320 /* Implement the "pseudo_register_write" gdbarch method. */
2323 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2324 int regnum, const gdb_byte *buf)
2326 gdb_byte reg_buf[V_REGISTER_SIZE];
2328 /* Ensure the register buffer is zero, we want gdb writes of the
2329 various 'scalar' pseudo registers to behavior like architectural
2330 writes, register width bytes are written the remainder are set to
2332 memset (reg_buf, 0, sizeof (reg_buf));
2334 regnum -= gdbarch_num_regs (gdbarch);
2336 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2338 /* pseudo Q registers */
2341 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2342 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2343 regcache_raw_write (regcache, v_regnum, reg_buf);
2347 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2349 /* pseudo D registers */
2352 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2353 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2354 regcache_raw_write (regcache, v_regnum, reg_buf);
2358 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2362 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2363 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2364 regcache_raw_write (regcache, v_regnum, reg_buf);
2368 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2370 /* pseudo H registers */
2373 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2374 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2375 regcache_raw_write (regcache, v_regnum, reg_buf);
2379 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2381 /* pseudo B registers */
2384 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2385 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2386 regcache_raw_write (regcache, v_regnum, reg_buf);
2390 gdb_assert_not_reached ("regnum out of bound");
2393 /* Callback function for user_reg_add. */
2395 static struct value *
2396 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2398 const int *reg_p = (const int *) baton;
2400 return value_of_register (*reg_p, frame);
2404 /* Implement the "software_single_step" gdbarch method, needed to
2405 single step through atomic sequences on AArch64. */
2407 static std::vector<CORE_ADDR>
2408 aarch64_software_single_step (struct regcache *regcache)
2410 struct gdbarch *gdbarch = regcache->arch ();
2411 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2412 const int insn_size = 4;
2413 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2414 CORE_ADDR pc = regcache_read_pc (regcache);
2415 CORE_ADDR breaks[2] = { -1, -1 };
2417 CORE_ADDR closing_insn = 0;
2418 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2419 byte_order_for_code);
2422 int bc_insn_count = 0; /* Conditional branch instruction count. */
2423 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2426 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2429 /* Look for a Load Exclusive instruction which begins the sequence. */
2430 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2433 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2436 insn = read_memory_unsigned_integer (loc, insn_size,
2437 byte_order_for_code);
2439 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2441 /* Check if the instruction is a conditional branch. */
2442 if (inst.opcode->iclass == condbranch)
2444 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2446 if (bc_insn_count >= 1)
2449 /* It is, so we'll try to set a breakpoint at the destination. */
2450 breaks[1] = loc + inst.operands[0].imm.value;
2456 /* Look for the Store Exclusive which closes the atomic sequence. */
2457 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2464 /* We didn't find a closing Store Exclusive instruction, fall back. */
2468 /* Insert breakpoint after the end of the atomic sequence. */
2469 breaks[0] = loc + insn_size;
2471 /* Check for duplicated breakpoints, and also check that the second
2472 breakpoint is not within the atomic sequence. */
2474 && (breaks[1] == breaks[0]
2475 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2476 last_breakpoint = 0;
2478 std::vector<CORE_ADDR> next_pcs;
2480 /* Insert the breakpoint at the end of the sequence, and one at the
2481 destination of the conditional branch, if it exists. */
2482 for (index = 0; index <= last_breakpoint; index++)
2483 next_pcs.push_back (breaks[index]);
2488 struct aarch64_displaced_step_closure : public displaced_step_closure
2490 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2491 is being displaced stepping. */
2494 /* PC adjustment offset after displaced stepping. */
2495 int32_t pc_adjust = 0;
2498 /* Data when visiting instructions for displaced stepping. */
2500 struct aarch64_displaced_step_data
2502 struct aarch64_insn_data base;
2504 /* The address where the instruction will be executed at. */
2506 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2507 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2508 /* Number of instructions in INSN_BUF. */
2509 unsigned insn_count;
2510 /* Registers when doing displaced stepping. */
2511 struct regcache *regs;
2513 aarch64_displaced_step_closure *dsc;
2516 /* Implementation of aarch64_insn_visitor method "b". */
2519 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2520 struct aarch64_insn_data *data)
2522 struct aarch64_displaced_step_data *dsd
2523 = (struct aarch64_displaced_step_data *) data;
2524 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2526 if (can_encode_int32 (new_offset, 28))
2528 /* Emit B rather than BL, because executing BL on a new address
2529 will get the wrong address into LR. In order to avoid this,
2530 we emit B, and update LR if the instruction is BL. */
2531 emit_b (dsd->insn_buf, 0, new_offset);
2537 emit_nop (dsd->insn_buf);
2539 dsd->dsc->pc_adjust = offset;
2545 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2546 data->insn_addr + 4);
2550 /* Implementation of aarch64_insn_visitor method "b_cond". */
2553 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2554 struct aarch64_insn_data *data)
2556 struct aarch64_displaced_step_data *dsd
2557 = (struct aarch64_displaced_step_data *) data;
2559 /* GDB has to fix up PC after displaced step this instruction
2560 differently according to the condition is true or false. Instead
2561 of checking COND against conditional flags, we can use
2562 the following instructions, and GDB can tell how to fix up PC
2563 according to the PC value.
2565 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2571 emit_bcond (dsd->insn_buf, cond, 8);
2573 dsd->dsc->pc_adjust = offset;
2574 dsd->insn_count = 1;
2577 /* Dynamically allocate a new register. If we know the register
2578 statically, we should make it a global as above instead of using this
2581 static struct aarch64_register
2582 aarch64_register (unsigned num, int is64)
2584 return (struct aarch64_register) { num, is64 };
2587 /* Implementation of aarch64_insn_visitor method "cb". */
2590 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2591 const unsigned rn, int is64,
2592 struct aarch64_insn_data *data)
2594 struct aarch64_displaced_step_data *dsd
2595 = (struct aarch64_displaced_step_data *) data;
2597 /* The offset is out of range for a compare and branch
2598 instruction. We can use the following instructions instead:
2600 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2605 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2606 dsd->insn_count = 1;
2608 dsd->dsc->pc_adjust = offset;
2611 /* Implementation of aarch64_insn_visitor method "tb". */
2614 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2615 const unsigned rt, unsigned bit,
2616 struct aarch64_insn_data *data)
2618 struct aarch64_displaced_step_data *dsd
2619 = (struct aarch64_displaced_step_data *) data;
2621 /* The offset is out of range for a test bit and branch
2622 instruction We can use the following instructions instead:
2624 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2630 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2631 dsd->insn_count = 1;
2633 dsd->dsc->pc_adjust = offset;
2636 /* Implementation of aarch64_insn_visitor method "adr". */
2639 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2640 const int is_adrp, struct aarch64_insn_data *data)
2642 struct aarch64_displaced_step_data *dsd
2643 = (struct aarch64_displaced_step_data *) data;
2644 /* We know exactly the address the ADR{P,} instruction will compute.
2645 We can just write it to the destination register. */
2646 CORE_ADDR address = data->insn_addr + offset;
2650 /* Clear the lower 12 bits of the offset to get the 4K page. */
2651 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2655 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2658 dsd->dsc->pc_adjust = 4;
2659 emit_nop (dsd->insn_buf);
2660 dsd->insn_count = 1;
2663 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2666 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2667 const unsigned rt, const int is64,
2668 struct aarch64_insn_data *data)
2670 struct aarch64_displaced_step_data *dsd
2671 = (struct aarch64_displaced_step_data *) data;
2672 CORE_ADDR address = data->insn_addr + offset;
2673 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2675 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2679 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2680 aarch64_register (rt, 1), zero);
2682 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2683 aarch64_register (rt, 1), zero);
2685 dsd->dsc->pc_adjust = 4;
2688 /* Implementation of aarch64_insn_visitor method "others". */
2691 aarch64_displaced_step_others (const uint32_t insn,
2692 struct aarch64_insn_data *data)
2694 struct aarch64_displaced_step_data *dsd
2695 = (struct aarch64_displaced_step_data *) data;
2697 aarch64_emit_insn (dsd->insn_buf, insn);
2698 dsd->insn_count = 1;
2700 if ((insn & 0xfffffc1f) == 0xd65f0000)
2703 dsd->dsc->pc_adjust = 0;
2706 dsd->dsc->pc_adjust = 4;
2709 static const struct aarch64_insn_visitor visitor =
2711 aarch64_displaced_step_b,
2712 aarch64_displaced_step_b_cond,
2713 aarch64_displaced_step_cb,
2714 aarch64_displaced_step_tb,
2715 aarch64_displaced_step_adr,
2716 aarch64_displaced_step_ldr_literal,
2717 aarch64_displaced_step_others,
2720 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2722 struct displaced_step_closure *
2723 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2724 CORE_ADDR from, CORE_ADDR to,
2725 struct regcache *regs)
2727 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2728 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2729 struct aarch64_displaced_step_data dsd;
2732 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2735 /* Look for a Load Exclusive instruction which begins the sequence. */
2736 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2738 /* We can't displaced step atomic sequences. */
2742 std::unique_ptr<aarch64_displaced_step_closure> dsc
2743 (new aarch64_displaced_step_closure);
2744 dsd.base.insn_addr = from;
2747 dsd.dsc = dsc.get ();
2749 aarch64_relocate_instruction (insn, &visitor,
2750 (struct aarch64_insn_data *) &dsd);
2751 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2753 if (dsd.insn_count != 0)
2757 /* Instruction can be relocated to scratch pad. Copy
2758 relocated instruction(s) there. */
2759 for (i = 0; i < dsd.insn_count; i++)
2761 if (debug_displaced)
2763 debug_printf ("displaced: writing insn ");
2764 debug_printf ("%.8x", dsd.insn_buf[i]);
2765 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2767 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2768 (ULONGEST) dsd.insn_buf[i]);
2776 return dsc.release ();
2779 /* Implement the "displaced_step_fixup" gdbarch method. */
2782 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2783 struct displaced_step_closure *dsc_,
2784 CORE_ADDR from, CORE_ADDR to,
2785 struct regcache *regs)
2787 aarch64_displaced_step_closure *dsc = (aarch64_displaced_step_closure *) dsc_;
2793 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2796 /* Condition is true. */
2798 else if (pc - to == 4)
2800 /* Condition is false. */
2804 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2807 if (dsc->pc_adjust != 0)
2809 if (debug_displaced)
2811 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2812 paddress (gdbarch, from), dsc->pc_adjust);
2814 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2815 from + dsc->pc_adjust);
2819 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2822 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2823 struct displaced_step_closure *closure)
2828 /* Get the correct target description. */
2831 aarch64_read_description ()
2833 static target_desc *aarch64_tdesc = NULL;
2834 target_desc **tdesc = &aarch64_tdesc;
2837 *tdesc = aarch64_create_target_description ();
2842 /* Initialize the current architecture based on INFO. If possible,
2843 re-use an architecture from ARCHES, which is a list of
2844 architectures already created during this debugging session.
2846 Called e.g. at program startup, when reading a core file, and when
2847 reading a binary file. */
2849 static struct gdbarch *
2850 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2852 struct gdbarch_tdep *tdep;
2853 struct gdbarch *gdbarch;
2854 struct gdbarch_list *best_arch;
2855 struct tdesc_arch_data *tdesc_data = NULL;
2856 const struct target_desc *tdesc = info.target_desc;
2859 const struct tdesc_feature *feature;
2861 int num_pseudo_regs = 0;
2863 /* Ensure we always have a target descriptor. */
2864 if (!tdesc_has_registers (tdesc))
2865 tdesc = aarch64_read_description ();
2869 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2871 if (feature == NULL)
2874 tdesc_data = tdesc_data_alloc ();
2876 /* Validate the descriptor provides the mandatory core R registers
2877 and allocate their numbers. */
2878 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2880 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2881 aarch64_r_register_names[i]);
2883 num_regs = AARCH64_X0_REGNUM + i;
2885 /* Look for the V registers. */
2886 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2889 /* Validate the descriptor provides the mandatory V registers
2890 and allocate their numbers. */
2891 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2893 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2894 aarch64_v_register_names[i]);
2896 num_regs = AARCH64_V0_REGNUM + i;
2898 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2899 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2900 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2901 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2902 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2907 tdesc_data_cleanup (tdesc_data);
2911 /* AArch64 code is always little-endian. */
2912 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2914 /* If there is already a candidate, use it. */
2915 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2917 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2919 /* Found a match. */
2923 if (best_arch != NULL)
2925 if (tdesc_data != NULL)
2926 tdesc_data_cleanup (tdesc_data);
2927 return best_arch->gdbarch;
2930 tdep = XCNEW (struct gdbarch_tdep);
2931 gdbarch = gdbarch_alloc (&info, tdep);
2933 /* This should be low enough for everything. */
2934 tdep->lowest_pc = 0x20;
2935 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2936 tdep->jb_elt_size = 8;
2938 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2939 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2941 /* Frame handling. */
2942 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2943 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2944 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2946 /* Advance PC across function entry code. */
2947 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2949 /* The stack grows downward. */
2950 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2952 /* Breakpoint manipulation. */
2953 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2954 aarch64_breakpoint::kind_from_pc);
2955 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2956 aarch64_breakpoint::bp_from_kind);
2957 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2958 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2960 /* Information about registers, etc. */
2961 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2962 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2963 set_gdbarch_num_regs (gdbarch, num_regs);
2965 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2966 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2967 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2968 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2969 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2970 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2971 aarch64_pseudo_register_reggroup_p);
2973 /* The top byte of an address is known as the "tag" and is
2974 ignored by the kernel, the hardware, etc. and can be regarded
2975 as additional data associated with the address. */
2976 set_gdbarch_significant_addr_bit (gdbarch, 56);
2979 set_gdbarch_short_bit (gdbarch, 16);
2980 set_gdbarch_int_bit (gdbarch, 32);
2981 set_gdbarch_float_bit (gdbarch, 32);
2982 set_gdbarch_double_bit (gdbarch, 64);
2983 set_gdbarch_long_double_bit (gdbarch, 128);
2984 set_gdbarch_long_bit (gdbarch, 64);
2985 set_gdbarch_long_long_bit (gdbarch, 64);
2986 set_gdbarch_ptr_bit (gdbarch, 64);
2987 set_gdbarch_char_signed (gdbarch, 0);
2988 set_gdbarch_wchar_signed (gdbarch, 0);
2989 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2990 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2991 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2993 /* Internal <-> external register number maps. */
2994 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2996 /* Returning results. */
2997 set_gdbarch_return_value (gdbarch, aarch64_return_value);
3000 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3002 /* Virtual tables. */
3003 set_gdbarch_vbit_in_delta (gdbarch, 1);
3005 /* Hook in the ABI-specific overrides, if they have been registered. */
3006 info.target_desc = tdesc;
3007 info.tdesc_data = tdesc_data;
3008 gdbarch_init_osabi (info, gdbarch);
3010 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3012 /* Add some default predicates. */
3013 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3014 dwarf2_append_unwinders (gdbarch);
3015 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3017 frame_base_set_default (gdbarch, &aarch64_normal_base);
3019 /* Now we have tuned the configuration, set a few final things,
3020 based on what the OS ABI has told us. */
3022 if (tdep->jb_pc >= 0)
3023 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3025 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3027 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3029 /* Add standard register aliases. */
3030 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3031 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3032 value_of_aarch64_user_reg,
3033 &aarch64_register_aliases[i].regnum);
3039 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3041 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3046 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3047 paddress (gdbarch, tdep->lowest_pc));
3053 static void aarch64_process_record_test (void);
3058 _initialize_aarch64_tdep (void)
3060 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3063 /* Debug this file's internals. */
3064 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3065 Set AArch64 debugging."), _("\
3066 Show AArch64 debugging."), _("\
3067 When on, AArch64 specific debugging is enabled."),
3070 &setdebuglist, &showdebuglist);
3073 selftests::register_test ("aarch64-analyze-prologue",
3074 selftests::aarch64_analyze_prologue_test);
3075 selftests::register_test ("aarch64-process-record",
3076 selftests::aarch64_process_record_test);
3077 selftests::record_xml_tdesc ("aarch64.xml",
3078 aarch64_create_target_description ());
3082 /* AArch64 process record-replay related structures, defines etc. */
3084 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3087 unsigned int reg_len = LENGTH; \
3090 REGS = XNEWVEC (uint32_t, reg_len); \
3091 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3096 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3099 unsigned int mem_len = LENGTH; \
3102 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3103 memcpy(&MEMS->len, &RECORD_BUF[0], \
3104 sizeof(struct aarch64_mem_r) * LENGTH); \
3109 /* AArch64 record/replay structures and enumerations. */
3111 struct aarch64_mem_r
3113 uint64_t len; /* Record length. */
3114 uint64_t addr; /* Memory address. */
3117 enum aarch64_record_result
3119 AARCH64_RECORD_SUCCESS,
3120 AARCH64_RECORD_UNSUPPORTED,
3121 AARCH64_RECORD_UNKNOWN
3124 typedef struct insn_decode_record_t
3126 struct gdbarch *gdbarch;
3127 struct regcache *regcache;
3128 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3129 uint32_t aarch64_insn; /* Insn to be recorded. */
3130 uint32_t mem_rec_count; /* Count of memory records. */
3131 uint32_t reg_rec_count; /* Count of register records. */
3132 uint32_t *aarch64_regs; /* Registers to be recorded. */
3133 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3134 } insn_decode_record;
3136 /* Record handler for data processing - register instructions. */
3139 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3141 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3142 uint32_t record_buf[4];
3144 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3145 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3146 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3148 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3152 /* Logical (shifted register). */
3153 if (insn_bits24_27 == 0x0a)
3154 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3156 else if (insn_bits24_27 == 0x0b)
3157 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3159 return AARCH64_RECORD_UNKNOWN;
3161 record_buf[0] = reg_rd;
3162 aarch64_insn_r->reg_rec_count = 1;
3164 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3168 if (insn_bits24_27 == 0x0b)
3170 /* Data-processing (3 source). */
3171 record_buf[0] = reg_rd;
3172 aarch64_insn_r->reg_rec_count = 1;
3174 else if (insn_bits24_27 == 0x0a)
3176 if (insn_bits21_23 == 0x00)
3178 /* Add/subtract (with carry). */
3179 record_buf[0] = reg_rd;
3180 aarch64_insn_r->reg_rec_count = 1;
3181 if (bit (aarch64_insn_r->aarch64_insn, 29))
3183 record_buf[1] = AARCH64_CPSR_REGNUM;
3184 aarch64_insn_r->reg_rec_count = 2;
3187 else if (insn_bits21_23 == 0x02)
3189 /* Conditional compare (register) and conditional compare
3190 (immediate) instructions. */
3191 record_buf[0] = AARCH64_CPSR_REGNUM;
3192 aarch64_insn_r->reg_rec_count = 1;
3194 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3196 /* CConditional select. */
3197 /* Data-processing (2 source). */
3198 /* Data-processing (1 source). */
3199 record_buf[0] = reg_rd;
3200 aarch64_insn_r->reg_rec_count = 1;
3203 return AARCH64_RECORD_UNKNOWN;
3207 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3209 return AARCH64_RECORD_SUCCESS;
3212 /* Record handler for data processing - immediate instructions. */
3215 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3217 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3218 uint32_t record_buf[4];
3220 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3221 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3222 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3224 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3225 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3226 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3228 record_buf[0] = reg_rd;
3229 aarch64_insn_r->reg_rec_count = 1;
3231 else if (insn_bits24_27 == 0x01)
3233 /* Add/Subtract (immediate). */
3234 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3235 record_buf[0] = reg_rd;
3236 aarch64_insn_r->reg_rec_count = 1;
3238 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3240 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3242 /* Logical (immediate). */
3243 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3244 record_buf[0] = reg_rd;
3245 aarch64_insn_r->reg_rec_count = 1;
3247 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3250 return AARCH64_RECORD_UNKNOWN;
3252 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3254 return AARCH64_RECORD_SUCCESS;
3257 /* Record handler for branch, exception generation and system instructions. */
3260 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3262 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3263 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3264 uint32_t record_buf[4];
3266 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3267 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3268 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3270 if (insn_bits28_31 == 0x0d)
3272 /* Exception generation instructions. */
3273 if (insn_bits24_27 == 0x04)
3275 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3276 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3277 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3279 ULONGEST svc_number;
3281 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3283 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3287 return AARCH64_RECORD_UNSUPPORTED;
3289 /* System instructions. */
3290 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3292 uint32_t reg_rt, reg_crn;
3294 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3295 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3297 /* Record rt in case of sysl and mrs instructions. */
3298 if (bit (aarch64_insn_r->aarch64_insn, 21))
3300 record_buf[0] = reg_rt;
3301 aarch64_insn_r->reg_rec_count = 1;
3303 /* Record cpsr for hint and msr(immediate) instructions. */
3304 else if (reg_crn == 0x02 || reg_crn == 0x04)
3306 record_buf[0] = AARCH64_CPSR_REGNUM;
3307 aarch64_insn_r->reg_rec_count = 1;
3310 /* Unconditional branch (register). */
3311 else if((insn_bits24_27 & 0x0e) == 0x06)
3313 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3314 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3315 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3318 return AARCH64_RECORD_UNKNOWN;
3320 /* Unconditional branch (immediate). */
3321 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3323 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3324 if (bit (aarch64_insn_r->aarch64_insn, 31))
3325 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3328 /* Compare & branch (immediate), Test & branch (immediate) and
3329 Conditional branch (immediate). */
3330 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3332 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3334 return AARCH64_RECORD_SUCCESS;
3337 /* Record handler for advanced SIMD load and store instructions. */
3340 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3343 uint64_t addr_offset = 0;
3344 uint32_t record_buf[24];
3345 uint64_t record_buf_mem[24];
3346 uint32_t reg_rn, reg_rt;
3347 uint32_t reg_index = 0, mem_index = 0;
3348 uint8_t opcode_bits, size_bits;
3350 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3351 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3352 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3353 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3354 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3357 debug_printf ("Process record: Advanced SIMD load/store\n");
3359 /* Load/store single structure. */
3360 if (bit (aarch64_insn_r->aarch64_insn, 24))
3362 uint8_t sindex, scale, selem, esize, replicate = 0;
3363 scale = opcode_bits >> 2;
3364 selem = ((opcode_bits & 0x02) |
3365 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3369 if (size_bits & 0x01)
3370 return AARCH64_RECORD_UNKNOWN;
3373 if ((size_bits >> 1) & 0x01)
3374 return AARCH64_RECORD_UNKNOWN;
3375 if (size_bits & 0x01)
3377 if (!((opcode_bits >> 1) & 0x01))
3380 return AARCH64_RECORD_UNKNOWN;
3384 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3391 return AARCH64_RECORD_UNKNOWN;
3397 for (sindex = 0; sindex < selem; sindex++)
3399 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3400 reg_rt = (reg_rt + 1) % 32;
3404 for (sindex = 0; sindex < selem; sindex++)
3406 if (bit (aarch64_insn_r->aarch64_insn, 22))
3407 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3410 record_buf_mem[mem_index++] = esize / 8;
3411 record_buf_mem[mem_index++] = address + addr_offset;
3413 addr_offset = addr_offset + (esize / 8);
3414 reg_rt = (reg_rt + 1) % 32;
3418 /* Load/store multiple structure. */
3421 uint8_t selem, esize, rpt, elements;
3422 uint8_t eindex, rindex;
3424 esize = 8 << size_bits;
3425 if (bit (aarch64_insn_r->aarch64_insn, 30))
3426 elements = 128 / esize;
3428 elements = 64 / esize;
3430 switch (opcode_bits)
3432 /*LD/ST4 (4 Registers). */
3437 /*LD/ST1 (4 Registers). */
3442 /*LD/ST3 (3 Registers). */
3447 /*LD/ST1 (3 Registers). */
3452 /*LD/ST1 (1 Register). */
3457 /*LD/ST2 (2 Registers). */
3462 /*LD/ST1 (2 Registers). */
3468 return AARCH64_RECORD_UNSUPPORTED;
3471 for (rindex = 0; rindex < rpt; rindex++)
3472 for (eindex = 0; eindex < elements; eindex++)
3474 uint8_t reg_tt, sindex;
3475 reg_tt = (reg_rt + rindex) % 32;
3476 for (sindex = 0; sindex < selem; sindex++)
3478 if (bit (aarch64_insn_r->aarch64_insn, 22))
3479 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3482 record_buf_mem[mem_index++] = esize / 8;
3483 record_buf_mem[mem_index++] = address + addr_offset;
3485 addr_offset = addr_offset + (esize / 8);
3486 reg_tt = (reg_tt + 1) % 32;
3491 if (bit (aarch64_insn_r->aarch64_insn, 23))
3492 record_buf[reg_index++] = reg_rn;
3494 aarch64_insn_r->reg_rec_count = reg_index;
3495 aarch64_insn_r->mem_rec_count = mem_index / 2;
3496 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3498 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3500 return AARCH64_RECORD_SUCCESS;
3503 /* Record handler for load and store instructions. */
3506 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3508 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3509 uint8_t insn_bit23, insn_bit21;
3510 uint8_t opc, size_bits, ld_flag, vector_flag;
3511 uint32_t reg_rn, reg_rt, reg_rt2;
3512 uint64_t datasize, offset;
3513 uint32_t record_buf[8];
3514 uint64_t record_buf_mem[8];
3517 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3518 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3519 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3520 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3521 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3522 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3523 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3524 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3525 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3526 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3527 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3529 /* Load/store exclusive. */
3530 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3533 debug_printf ("Process record: load/store exclusive\n");
3537 record_buf[0] = reg_rt;
3538 aarch64_insn_r->reg_rec_count = 1;
3541 record_buf[1] = reg_rt2;
3542 aarch64_insn_r->reg_rec_count = 2;
3548 datasize = (8 << size_bits) * 2;
3550 datasize = (8 << size_bits);
3551 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3553 record_buf_mem[0] = datasize / 8;
3554 record_buf_mem[1] = address;
3555 aarch64_insn_r->mem_rec_count = 1;
3558 /* Save register rs. */
3559 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3560 aarch64_insn_r->reg_rec_count = 1;
3564 /* Load register (literal) instructions decoding. */
3565 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3568 debug_printf ("Process record: load register (literal)\n");
3570 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3572 record_buf[0] = reg_rt;
3573 aarch64_insn_r->reg_rec_count = 1;
3575 /* All types of load/store pair instructions decoding. */
3576 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3579 debug_printf ("Process record: load/store pair\n");
3585 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3586 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3590 record_buf[0] = reg_rt;
3591 record_buf[1] = reg_rt2;
3593 aarch64_insn_r->reg_rec_count = 2;
3598 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3600 size_bits = size_bits >> 1;
3601 datasize = 8 << (2 + size_bits);
3602 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3603 offset = offset << (2 + size_bits);
3604 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3606 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3608 if (imm7_off & 0x40)
3609 address = address - offset;
3611 address = address + offset;
3614 record_buf_mem[0] = datasize / 8;
3615 record_buf_mem[1] = address;
3616 record_buf_mem[2] = datasize / 8;
3617 record_buf_mem[3] = address + (datasize / 8);
3618 aarch64_insn_r->mem_rec_count = 2;
3620 if (bit (aarch64_insn_r->aarch64_insn, 23))
3621 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3623 /* Load/store register (unsigned immediate) instructions. */
3624 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3626 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3636 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3638 /* PRFM (immediate) */
3639 return AARCH64_RECORD_SUCCESS;
3641 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3643 /* LDRSW (immediate) */
3657 debug_printf ("Process record: load/store (unsigned immediate):"
3658 " size %x V %d opc %x\n", size_bits, vector_flag,
3664 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3665 datasize = 8 << size_bits;
3666 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3668 offset = offset << size_bits;
3669 address = address + offset;
3671 record_buf_mem[0] = datasize >> 3;
3672 record_buf_mem[1] = address;
3673 aarch64_insn_r->mem_rec_count = 1;
3678 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3680 record_buf[0] = reg_rt;
3681 aarch64_insn_r->reg_rec_count = 1;
3684 /* Load/store register (register offset) instructions. */
3685 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3686 && insn_bits10_11 == 0x02 && insn_bit21)
3689 debug_printf ("Process record: load/store (register offset)\n");
3690 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3697 if (size_bits != 0x03)
3700 return AARCH64_RECORD_UNKNOWN;
3704 ULONGEST reg_rm_val;
3706 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3707 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3708 if (bit (aarch64_insn_r->aarch64_insn, 12))
3709 offset = reg_rm_val << size_bits;
3711 offset = reg_rm_val;
3712 datasize = 8 << size_bits;
3713 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3715 address = address + offset;
3716 record_buf_mem[0] = datasize >> 3;
3717 record_buf_mem[1] = address;
3718 aarch64_insn_r->mem_rec_count = 1;
3723 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3725 record_buf[0] = reg_rt;
3726 aarch64_insn_r->reg_rec_count = 1;
3729 /* Load/store register (immediate and unprivileged) instructions. */
3730 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3735 debug_printf ("Process record: load/store "
3736 "(immediate and unprivileged)\n");
3738 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3745 if (size_bits != 0x03)
3748 return AARCH64_RECORD_UNKNOWN;
3753 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3754 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3755 datasize = 8 << size_bits;
3756 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3758 if (insn_bits10_11 != 0x01)
3760 if (imm9_off & 0x0100)
3761 address = address - offset;
3763 address = address + offset;
3765 record_buf_mem[0] = datasize >> 3;
3766 record_buf_mem[1] = address;
3767 aarch64_insn_r->mem_rec_count = 1;
3772 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3774 record_buf[0] = reg_rt;
3775 aarch64_insn_r->reg_rec_count = 1;
3777 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3778 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3780 /* Advanced SIMD load/store instructions. */
3782 return aarch64_record_asimd_load_store (aarch64_insn_r);
3784 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3786 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3788 return AARCH64_RECORD_SUCCESS;
3791 /* Record handler for data processing SIMD and floating point instructions. */
3794 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3796 uint8_t insn_bit21, opcode, rmode, reg_rd;
3797 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3798 uint8_t insn_bits11_14;
3799 uint32_t record_buf[2];
3801 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3802 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3803 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3804 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3805 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3806 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3807 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3808 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3809 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3812 debug_printf ("Process record: data processing SIMD/FP: ");
3814 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3816 /* Floating point - fixed point conversion instructions. */
3820 debug_printf ("FP - fixed point conversion");
3822 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3823 record_buf[0] = reg_rd;
3825 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3827 /* Floating point - conditional compare instructions. */
3828 else if (insn_bits10_11 == 0x01)
3831 debug_printf ("FP - conditional compare");
3833 record_buf[0] = AARCH64_CPSR_REGNUM;
3835 /* Floating point - data processing (2-source) and
3836 conditional select instructions. */
3837 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3840 debug_printf ("FP - DP (2-source)");
3842 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3844 else if (insn_bits10_11 == 0x00)
3846 /* Floating point - immediate instructions. */
3847 if ((insn_bits12_15 & 0x01) == 0x01
3848 || (insn_bits12_15 & 0x07) == 0x04)
3851 debug_printf ("FP - immediate");
3852 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3854 /* Floating point - compare instructions. */
3855 else if ((insn_bits12_15 & 0x03) == 0x02)
3858 debug_printf ("FP - immediate");
3859 record_buf[0] = AARCH64_CPSR_REGNUM;
3861 /* Floating point - integer conversions instructions. */
3862 else if (insn_bits12_15 == 0x00)
3864 /* Convert float to integer instruction. */
3865 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3868 debug_printf ("float to int conversion");
3870 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3872 /* Convert integer to float instruction. */
3873 else if ((opcode >> 1) == 0x01 && !rmode)
3876 debug_printf ("int to float conversion");
3878 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3880 /* Move float to integer instruction. */
3881 else if ((opcode >> 1) == 0x03)
3884 debug_printf ("move float to int");
3886 if (!(opcode & 0x01))
3887 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3889 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3892 return AARCH64_RECORD_UNKNOWN;
3895 return AARCH64_RECORD_UNKNOWN;
3898 return AARCH64_RECORD_UNKNOWN;
3900 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3903 debug_printf ("SIMD copy");
3905 /* Advanced SIMD copy instructions. */
3906 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3907 && !bit (aarch64_insn_r->aarch64_insn, 15)
3908 && bit (aarch64_insn_r->aarch64_insn, 10))
3910 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3911 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3913 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3916 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3918 /* All remaining floating point or advanced SIMD instructions. */
3922 debug_printf ("all remain");
3924 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3928 debug_printf ("\n");
3930 aarch64_insn_r->reg_rec_count++;
3931 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3932 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3934 return AARCH64_RECORD_SUCCESS;
3937 /* Decodes insns type and invokes its record handler. */
3940 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3942 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3944 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3945 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3946 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3947 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3949 /* Data processing - immediate instructions. */
3950 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3951 return aarch64_record_data_proc_imm (aarch64_insn_r);
3953 /* Branch, exception generation and system instructions. */
3954 if (ins_bit26 && !ins_bit27 && ins_bit28)
3955 return aarch64_record_branch_except_sys (aarch64_insn_r);
3957 /* Load and store instructions. */
3958 if (!ins_bit25 && ins_bit27)
3959 return aarch64_record_load_store (aarch64_insn_r);
3961 /* Data processing - register instructions. */
3962 if (ins_bit25 && !ins_bit26 && ins_bit27)
3963 return aarch64_record_data_proc_reg (aarch64_insn_r);
3965 /* Data processing - SIMD and floating point instructions. */
3966 if (ins_bit25 && ins_bit26 && ins_bit27)
3967 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3969 return AARCH64_RECORD_UNSUPPORTED;
3972 /* Cleans up local record registers and memory allocations. */
3975 deallocate_reg_mem (insn_decode_record *record)
3977 xfree (record->aarch64_regs);
3978 xfree (record->aarch64_mems);
3982 namespace selftests {
3985 aarch64_process_record_test (void)
3987 struct gdbarch_info info;
3990 gdbarch_info_init (&info);
3991 info.bfd_arch_info = bfd_scan_arch ("aarch64");
3993 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
3994 SELF_CHECK (gdbarch != NULL);
3996 insn_decode_record aarch64_record;
3998 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3999 aarch64_record.regcache = NULL;
4000 aarch64_record.this_addr = 0;
4001 aarch64_record.gdbarch = gdbarch;
4003 /* 20 00 80 f9 prfm pldl1keep, [x1] */
4004 aarch64_record.aarch64_insn = 0xf9800020;
4005 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4006 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4007 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4008 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4010 deallocate_reg_mem (&aarch64_record);
4013 } // namespace selftests
4014 #endif /* GDB_SELF_TEST */
4016 /* Parse the current instruction and record the values of the registers and
4017 memory that will be changed in current instruction to record_arch_list
4018 return -1 if something is wrong. */
4021 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4022 CORE_ADDR insn_addr)
4024 uint32_t rec_no = 0;
4025 uint8_t insn_size = 4;
4027 gdb_byte buf[insn_size];
4028 insn_decode_record aarch64_record;
4030 memset (&buf[0], 0, insn_size);
4031 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4032 target_read_memory (insn_addr, &buf[0], insn_size);
4033 aarch64_record.aarch64_insn
4034 = (uint32_t) extract_unsigned_integer (&buf[0],
4036 gdbarch_byte_order (gdbarch));
4037 aarch64_record.regcache = regcache;
4038 aarch64_record.this_addr = insn_addr;
4039 aarch64_record.gdbarch = gdbarch;
4041 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4042 if (ret == AARCH64_RECORD_UNSUPPORTED)
4044 printf_unfiltered (_("Process record does not support instruction "
4045 "0x%0x at address %s.\n"),
4046 aarch64_record.aarch64_insn,
4047 paddress (gdbarch, insn_addr));
4053 /* Record registers. */
4054 record_full_arch_list_add_reg (aarch64_record.regcache,
4056 /* Always record register CPSR. */
4057 record_full_arch_list_add_reg (aarch64_record.regcache,
4058 AARCH64_CPSR_REGNUM);
4059 if (aarch64_record.aarch64_regs)
4060 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4061 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4062 aarch64_record.aarch64_regs[rec_no]))
4065 /* Record memories. */
4066 if (aarch64_record.aarch64_mems)
4067 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4068 if (record_full_arch_list_add_mem
4069 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4070 aarch64_record.aarch64_mems[rec_no].len))
4073 if (record_full_arch_list_add_end ())
4077 deallocate_reg_mem (&aarch64_record);