1 /* Common target dependent code for GDB on AArch64 systems.
3 Copyright (C) 2009-2017 Free Software Foundation, Inc.
4 Contributed by ARM Ltd.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "reggroups.h"
32 #include "arch-utils.h"
34 #include "frame-unwind.h"
35 #include "frame-base.h"
36 #include "trad-frame.h"
38 #include "dwarf2-frame.h"
40 #include "prologue-value.h"
41 #include "target-descriptions.h"
42 #include "user-regs.h"
49 #include "aarch64-tdep.h"
52 #include "elf/aarch64.h"
57 #include "record-full.h"
59 #include "features/aarch64.c"
61 #include "arch/aarch64-insn.h"
63 #include "opcode/aarch64.h"
66 #define submask(x) ((1L << ((x) + 1)) - 1)
67 #define bit(obj,st) (((obj) >> (st)) & 1)
68 #define bits(obj,st,fn) (((obj) >> (st)) & submask ((fn) - (st)))
70 /* Pseudo register base numbers. */
71 #define AARCH64_Q0_REGNUM 0
72 #define AARCH64_D0_REGNUM (AARCH64_Q0_REGNUM + AARCH64_D_REGISTER_COUNT)
73 #define AARCH64_S0_REGNUM (AARCH64_D0_REGNUM + 32)
74 #define AARCH64_H0_REGNUM (AARCH64_S0_REGNUM + 32)
75 #define AARCH64_B0_REGNUM (AARCH64_H0_REGNUM + 32)
77 /* The standard register names, and all the valid aliases for them. */
80 const char *const name;
82 } aarch64_register_aliases[] =
84 /* 64-bit register names. */
85 {"fp", AARCH64_FP_REGNUM},
86 {"lr", AARCH64_LR_REGNUM},
87 {"sp", AARCH64_SP_REGNUM},
89 /* 32-bit register names. */
90 {"w0", AARCH64_X0_REGNUM + 0},
91 {"w1", AARCH64_X0_REGNUM + 1},
92 {"w2", AARCH64_X0_REGNUM + 2},
93 {"w3", AARCH64_X0_REGNUM + 3},
94 {"w4", AARCH64_X0_REGNUM + 4},
95 {"w5", AARCH64_X0_REGNUM + 5},
96 {"w6", AARCH64_X0_REGNUM + 6},
97 {"w7", AARCH64_X0_REGNUM + 7},
98 {"w8", AARCH64_X0_REGNUM + 8},
99 {"w9", AARCH64_X0_REGNUM + 9},
100 {"w10", AARCH64_X0_REGNUM + 10},
101 {"w11", AARCH64_X0_REGNUM + 11},
102 {"w12", AARCH64_X0_REGNUM + 12},
103 {"w13", AARCH64_X0_REGNUM + 13},
104 {"w14", AARCH64_X0_REGNUM + 14},
105 {"w15", AARCH64_X0_REGNUM + 15},
106 {"w16", AARCH64_X0_REGNUM + 16},
107 {"w17", AARCH64_X0_REGNUM + 17},
108 {"w18", AARCH64_X0_REGNUM + 18},
109 {"w19", AARCH64_X0_REGNUM + 19},
110 {"w20", AARCH64_X0_REGNUM + 20},
111 {"w21", AARCH64_X0_REGNUM + 21},
112 {"w22", AARCH64_X0_REGNUM + 22},
113 {"w23", AARCH64_X0_REGNUM + 23},
114 {"w24", AARCH64_X0_REGNUM + 24},
115 {"w25", AARCH64_X0_REGNUM + 25},
116 {"w26", AARCH64_X0_REGNUM + 26},
117 {"w27", AARCH64_X0_REGNUM + 27},
118 {"w28", AARCH64_X0_REGNUM + 28},
119 {"w29", AARCH64_X0_REGNUM + 29},
120 {"w30", AARCH64_X0_REGNUM + 30},
123 {"ip0", AARCH64_X0_REGNUM + 16},
124 {"ip1", AARCH64_X0_REGNUM + 17}
127 /* The required core 'R' registers. */
128 static const char *const aarch64_r_register_names[] =
130 /* These registers must appear in consecutive RAW register number
131 order and they must begin with AARCH64_X0_REGNUM! */
132 "x0", "x1", "x2", "x3",
133 "x4", "x5", "x6", "x7",
134 "x8", "x9", "x10", "x11",
135 "x12", "x13", "x14", "x15",
136 "x16", "x17", "x18", "x19",
137 "x20", "x21", "x22", "x23",
138 "x24", "x25", "x26", "x27",
139 "x28", "x29", "x30", "sp",
143 /* The FP/SIMD 'V' registers. */
144 static const char *const aarch64_v_register_names[] =
146 /* These registers must appear in consecutive RAW register number
147 order and they must begin with AARCH64_V0_REGNUM! */
148 "v0", "v1", "v2", "v3",
149 "v4", "v5", "v6", "v7",
150 "v8", "v9", "v10", "v11",
151 "v12", "v13", "v14", "v15",
152 "v16", "v17", "v18", "v19",
153 "v20", "v21", "v22", "v23",
154 "v24", "v25", "v26", "v27",
155 "v28", "v29", "v30", "v31",
160 /* AArch64 prologue cache structure. */
161 struct aarch64_prologue_cache
163 /* The program counter at the start of the function. It is used to
164 identify this frame as a prologue frame. */
167 /* The program counter at the time this frame was created; i.e. where
168 this function was called from. It is used to identify this frame as a
172 /* The stack pointer at the time this frame was created; i.e. the
173 caller's stack pointer when this function was called. It is used
174 to identify this frame. */
177 /* Is the target available to read from? */
180 /* The frame base for this frame is just prev_sp - frame size.
181 FRAMESIZE is the distance from the frame pointer to the
182 initial stack pointer. */
185 /* The register used to hold the frame pointer for this frame. */
188 /* Saved register offsets. */
189 struct trad_frame_saved_reg *saved_regs;
193 show_aarch64_debug (struct ui_file *file, int from_tty,
194 struct cmd_list_element *c, const char *value)
196 fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
201 /* Abstract instruction reader. */
203 class abstract_instruction_reader
206 /* Read in one instruction. */
207 virtual ULONGEST read (CORE_ADDR memaddr, int len,
208 enum bfd_endian byte_order) = 0;
211 /* Instruction reader from real target. */
213 class instruction_reader : public abstract_instruction_reader
216 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
218 return read_code_unsigned_integer (memaddr, len, byte_order);
224 /* Analyze a prologue, looking for a recognizable stack frame
225 and frame pointer. Scan until we encounter a store that could
226 clobber the stack frame unexpectedly, or an unknown instruction. */
229 aarch64_analyze_prologue (struct gdbarch *gdbarch,
230 CORE_ADDR start, CORE_ADDR limit,
231 struct aarch64_prologue_cache *cache,
232 abstract_instruction_reader& reader)
234 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
236 /* Track X registers and D registers in prologue. */
237 pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
238 struct pv_area *stack;
239 struct cleanup *back_to;
241 for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
242 regs[i] = pv_register (i, 0);
243 stack = make_pv_area (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
244 back_to = make_cleanup_free_pv_area (stack);
246 for (; start < limit; start += 4)
251 insn = reader.read (start, 4, byte_order_for_code);
253 if (aarch64_decode_insn (insn, &inst, 1) != 0)
256 if (inst.opcode->iclass == addsub_imm
257 && (inst.opcode->op == OP_ADD
258 || strcmp ("sub", inst.opcode->name) == 0))
260 unsigned rd = inst.operands[0].reg.regno;
261 unsigned rn = inst.operands[1].reg.regno;
263 gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
264 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
265 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
266 gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
268 if (inst.opcode->op == OP_ADD)
270 regs[rd] = pv_add_constant (regs[rn],
271 inst.operands[2].imm.value);
275 regs[rd] = pv_add_constant (regs[rn],
276 -inst.operands[2].imm.value);
279 else if (inst.opcode->iclass == pcreladdr
280 && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
282 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
283 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
285 regs[inst.operands[0].reg.regno] = pv_unknown ();
287 else if (inst.opcode->iclass == branch_imm)
289 /* Stop analysis on branch. */
292 else if (inst.opcode->iclass == condbranch)
294 /* Stop analysis on branch. */
297 else if (inst.opcode->iclass == branch_reg)
299 /* Stop analysis on branch. */
302 else if (inst.opcode->iclass == compbranch)
304 /* Stop analysis on branch. */
307 else if (inst.opcode->op == OP_MOVZ)
309 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
310 regs[inst.operands[0].reg.regno] = pv_unknown ();
312 else if (inst.opcode->iclass == log_shift
313 && strcmp (inst.opcode->name, "orr") == 0)
315 unsigned rd = inst.operands[0].reg.regno;
316 unsigned rn = inst.operands[1].reg.regno;
317 unsigned rm = inst.operands[2].reg.regno;
319 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
320 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
321 gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
323 if (inst.operands[2].shifter.amount == 0
324 && rn == AARCH64_SP_REGNUM)
330 debug_printf ("aarch64: prologue analysis gave up "
331 "addr=%s opcode=0x%x (orr x register)\n",
332 core_addr_to_string_nz (start), insn);
337 else if (inst.opcode->op == OP_STUR)
339 unsigned rt = inst.operands[0].reg.regno;
340 unsigned rn = inst.operands[1].addr.base_regno;
342 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
344 gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
345 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
346 gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
347 gdb_assert (!inst.operands[1].addr.offset.is_reg);
349 pv_area_store (stack, pv_add_constant (regs[rn],
350 inst.operands[1].addr.offset.imm),
351 is64 ? 8 : 4, regs[rt]);
353 else if ((inst.opcode->iclass == ldstpair_off
354 || (inst.opcode->iclass == ldstpair_indexed
355 && inst.operands[2].addr.preind))
356 && strcmp ("stp", inst.opcode->name) == 0)
358 /* STP with addressing mode Pre-indexed and Base register. */
361 unsigned rn = inst.operands[2].addr.base_regno;
362 int32_t imm = inst.operands[2].addr.offset.imm;
364 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
365 || inst.operands[0].type == AARCH64_OPND_Ft);
366 gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
367 || inst.operands[1].type == AARCH64_OPND_Ft2);
368 gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
369 gdb_assert (!inst.operands[2].addr.offset.is_reg);
371 /* If recording this store would invalidate the store area
372 (perhaps because rn is not known) then we should abandon
373 further prologue analysis. */
374 if (pv_area_store_would_trash (stack,
375 pv_add_constant (regs[rn], imm)))
378 if (pv_area_store_would_trash (stack,
379 pv_add_constant (regs[rn], imm + 8)))
382 rt1 = inst.operands[0].reg.regno;
383 rt2 = inst.operands[1].reg.regno;
384 if (inst.operands[0].type == AARCH64_OPND_Ft)
386 /* Only bottom 64-bit of each V register (D register) need
388 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
389 rt1 += AARCH64_X_REGISTER_COUNT;
390 rt2 += AARCH64_X_REGISTER_COUNT;
393 pv_area_store (stack, pv_add_constant (regs[rn], imm), 8,
395 pv_area_store (stack, pv_add_constant (regs[rn], imm + 8), 8,
398 if (inst.operands[2].addr.writeback)
399 regs[rn] = pv_add_constant (regs[rn], imm);
402 else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate. */
403 || (inst.opcode->iclass == ldst_pos /* Unsigned immediate. */
404 && (inst.opcode->op == OP_STR_POS
405 || inst.opcode->op == OP_STRF_POS)))
406 && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
407 && strcmp ("str", inst.opcode->name) == 0)
409 /* STR (immediate) */
410 unsigned int rt = inst.operands[0].reg.regno;
411 int32_t imm = inst.operands[1].addr.offset.imm;
412 unsigned int rn = inst.operands[1].addr.base_regno;
414 = (aarch64_get_qualifier_esize (inst.operands[0].qualifier) == 8);
415 gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
416 || inst.operands[0].type == AARCH64_OPND_Ft);
418 if (inst.operands[0].type == AARCH64_OPND_Ft)
420 /* Only bottom 64-bit of each V register (D register) need
422 gdb_assert (inst.operands[0].qualifier == AARCH64_OPND_QLF_S_D);
423 rt += AARCH64_X_REGISTER_COUNT;
426 pv_area_store (stack, pv_add_constant (regs[rn], imm),
427 is64 ? 8 : 4, regs[rt]);
428 if (inst.operands[1].addr.writeback)
429 regs[rn] = pv_add_constant (regs[rn], imm);
431 else if (inst.opcode->iclass == testbranch)
433 /* Stop analysis on branch. */
440 debug_printf ("aarch64: prologue analysis gave up addr=%s"
442 core_addr_to_string_nz (start), insn);
450 do_cleanups (back_to);
454 if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
456 /* Frame pointer is fp. Frame size is constant. */
457 cache->framereg = AARCH64_FP_REGNUM;
458 cache->framesize = -regs[AARCH64_FP_REGNUM].k;
460 else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
462 /* Try the stack pointer. */
463 cache->framesize = -regs[AARCH64_SP_REGNUM].k;
464 cache->framereg = AARCH64_SP_REGNUM;
468 /* We're just out of luck. We don't know where the frame is. */
469 cache->framereg = -1;
470 cache->framesize = 0;
473 for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
477 if (pv_area_find_reg (stack, gdbarch, i, &offset))
478 cache->saved_regs[i].addr = offset;
481 for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
483 int regnum = gdbarch_num_regs (gdbarch);
486 if (pv_area_find_reg (stack, gdbarch, i + AARCH64_X_REGISTER_COUNT,
488 cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].addr = offset;
491 do_cleanups (back_to);
496 aarch64_analyze_prologue (struct gdbarch *gdbarch,
497 CORE_ADDR start, CORE_ADDR limit,
498 struct aarch64_prologue_cache *cache)
500 instruction_reader reader;
502 return aarch64_analyze_prologue (gdbarch, start, limit, cache,
508 namespace selftests {
510 /* Instruction reader from manually cooked instruction sequences. */
512 class instruction_reader_test : public abstract_instruction_reader
515 template<size_t SIZE>
516 explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
517 : m_insns (insns), m_insns_size (SIZE)
520 ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
522 SELF_CHECK (len == 4);
523 SELF_CHECK (memaddr % 4 == 0);
524 SELF_CHECK (memaddr / 4 < m_insns_size);
526 return m_insns[memaddr / 4];
530 const uint32_t *m_insns;
535 aarch64_analyze_prologue_test (void)
537 struct gdbarch_info info;
539 gdbarch_info_init (&info);
540 info.bfd_arch_info = bfd_scan_arch ("aarch64");
542 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
543 SELF_CHECK (gdbarch != NULL);
545 /* Test the simple prologue in which frame pointer is used. */
547 struct aarch64_prologue_cache cache;
548 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
550 static const uint32_t insns[] = {
551 0xa9af7bfd, /* stp x29, x30, [sp,#-272]! */
552 0x910003fd, /* mov x29, sp */
553 0x97ffffe6, /* bl 0x400580 */
555 instruction_reader_test reader (insns);
557 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
558 SELF_CHECK (end == 4 * 2);
560 SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
561 SELF_CHECK (cache.framesize == 272);
563 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
565 if (i == AARCH64_FP_REGNUM)
566 SELF_CHECK (cache.saved_regs[i].addr == -272);
567 else if (i == AARCH64_LR_REGNUM)
568 SELF_CHECK (cache.saved_regs[i].addr == -264);
570 SELF_CHECK (cache.saved_regs[i].addr == -1);
573 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
575 int regnum = gdbarch_num_regs (gdbarch);
577 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
582 /* Test a prologue in which STR is used and frame pointer is not
585 struct aarch64_prologue_cache cache;
586 cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
588 static const uint32_t insns[] = {
589 0xf81d0ff3, /* str x19, [sp, #-48]! */
590 0xb9002fe0, /* str w0, [sp, #44] */
591 0xf90013e1, /* str x1, [sp, #32]*/
592 0xfd000fe0, /* str d0, [sp, #24] */
593 0xaa0203f3, /* mov x19, x2 */
594 0xf94013e0, /* ldr x0, [sp, #32] */
596 instruction_reader_test reader (insns);
598 CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
600 SELF_CHECK (end == 4 * 5);
602 SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
603 SELF_CHECK (cache.framesize == 48);
605 for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
608 SELF_CHECK (cache.saved_regs[i].addr == -16);
610 SELF_CHECK (cache.saved_regs[i].addr == -48);
612 SELF_CHECK (cache.saved_regs[i].addr == -1);
615 for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
617 int regnum = gdbarch_num_regs (gdbarch);
620 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
623 SELF_CHECK (cache.saved_regs[i + regnum + AARCH64_D0_REGNUM].addr
628 } // namespace selftests
629 #endif /* GDB_SELF_TEST */
631 /* Implement the "skip_prologue" gdbarch method. */
634 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
636 CORE_ADDR func_addr, limit_pc;
638 /* See if we can determine the end of the prologue via the symbol
639 table. If so, then return either PC, or the PC after the
640 prologue, whichever is greater. */
641 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
643 CORE_ADDR post_prologue_pc
644 = skip_prologue_using_sal (gdbarch, func_addr);
646 if (post_prologue_pc != 0)
647 return std::max (pc, post_prologue_pc);
650 /* Can't determine prologue from the symbol table, need to examine
653 /* Find an upper limit on the function prologue using the debug
654 information. If the debug information could not be used to
655 provide that bound, then use an arbitrary large number as the
657 limit_pc = skip_prologue_using_sal (gdbarch, pc);
659 limit_pc = pc + 128; /* Magic. */
661 /* Try disassembling prologue. */
662 return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
665 /* Scan the function prologue for THIS_FRAME and populate the prologue
669 aarch64_scan_prologue (struct frame_info *this_frame,
670 struct aarch64_prologue_cache *cache)
672 CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
673 CORE_ADDR prologue_start;
674 CORE_ADDR prologue_end;
675 CORE_ADDR prev_pc = get_frame_pc (this_frame);
676 struct gdbarch *gdbarch = get_frame_arch (this_frame);
678 cache->prev_pc = prev_pc;
680 /* Assume we do not find a frame. */
681 cache->framereg = -1;
682 cache->framesize = 0;
684 if (find_pc_partial_function (block_addr, NULL, &prologue_start,
687 struct symtab_and_line sal = find_pc_line (prologue_start, 0);
691 /* No line info so use the current PC. */
692 prologue_end = prev_pc;
694 else if (sal.end < prologue_end)
696 /* The next line begins after the function end. */
697 prologue_end = sal.end;
700 prologue_end = std::min (prologue_end, prev_pc);
701 aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
707 frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
711 cache->framereg = AARCH64_FP_REGNUM;
712 cache->framesize = 16;
713 cache->saved_regs[29].addr = 0;
714 cache->saved_regs[30].addr = 8;
718 /* Fill in *CACHE with information about the prologue of *THIS_FRAME. This
719 function may throw an exception if the inferior's registers or memory is
723 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
724 struct aarch64_prologue_cache *cache)
726 CORE_ADDR unwound_fp;
729 aarch64_scan_prologue (this_frame, cache);
731 if (cache->framereg == -1)
734 unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
738 cache->prev_sp = unwound_fp + cache->framesize;
740 /* Calculate actual addresses of saved registers using offsets
741 determined by aarch64_analyze_prologue. */
742 for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
743 if (trad_frame_addr_p (cache->saved_regs, reg))
744 cache->saved_regs[reg].addr += cache->prev_sp;
746 cache->func = get_frame_func (this_frame);
748 cache->available_p = 1;
751 /* Allocate and fill in *THIS_CACHE with information about the prologue of
752 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
753 Return a pointer to the current aarch64_prologue_cache in
756 static struct aarch64_prologue_cache *
757 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
759 struct aarch64_prologue_cache *cache;
761 if (*this_cache != NULL)
762 return (struct aarch64_prologue_cache *) *this_cache;
764 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
765 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
770 aarch64_make_prologue_cache_1 (this_frame, cache);
772 CATCH (ex, RETURN_MASK_ERROR)
774 if (ex.error != NOT_AVAILABLE_ERROR)
775 throw_exception (ex);
782 /* Implement the "stop_reason" frame_unwind method. */
784 static enum unwind_stop_reason
785 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
788 struct aarch64_prologue_cache *cache
789 = aarch64_make_prologue_cache (this_frame, this_cache);
791 if (!cache->available_p)
792 return UNWIND_UNAVAILABLE;
794 /* Halt the backtrace at "_start". */
795 if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
796 return UNWIND_OUTERMOST;
798 /* We've hit a wall, stop. */
799 if (cache->prev_sp == 0)
800 return UNWIND_OUTERMOST;
802 return UNWIND_NO_REASON;
805 /* Our frame ID for a normal frame is the current function's starting
806 PC and the caller's SP when we were called. */
809 aarch64_prologue_this_id (struct frame_info *this_frame,
810 void **this_cache, struct frame_id *this_id)
812 struct aarch64_prologue_cache *cache
813 = aarch64_make_prologue_cache (this_frame, this_cache);
815 if (!cache->available_p)
816 *this_id = frame_id_build_unavailable_stack (cache->func);
818 *this_id = frame_id_build (cache->prev_sp, cache->func);
821 /* Implement the "prev_register" frame_unwind method. */
823 static struct value *
824 aarch64_prologue_prev_register (struct frame_info *this_frame,
825 void **this_cache, int prev_regnum)
827 struct aarch64_prologue_cache *cache
828 = aarch64_make_prologue_cache (this_frame, this_cache);
830 /* If we are asked to unwind the PC, then we need to return the LR
831 instead. The prologue may save PC, but it will point into this
832 frame's prologue, not the next frame's resume location. */
833 if (prev_regnum == AARCH64_PC_REGNUM)
837 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
838 return frame_unwind_got_constant (this_frame, prev_regnum, lr);
841 /* SP is generally not saved to the stack, but this frame is
842 identified by the next frame's stack pointer at the time of the
843 call. The value was already reconstructed into PREV_SP. */
856 if (prev_regnum == AARCH64_SP_REGNUM)
857 return frame_unwind_got_constant (this_frame, prev_regnum,
860 return trad_frame_get_prev_register (this_frame, cache->saved_regs,
864 /* AArch64 prologue unwinder. */
865 struct frame_unwind aarch64_prologue_unwind =
868 aarch64_prologue_frame_unwind_stop_reason,
869 aarch64_prologue_this_id,
870 aarch64_prologue_prev_register,
872 default_frame_sniffer
875 /* Allocate and fill in *THIS_CACHE with information about the prologue of
876 *THIS_FRAME. Do not do this is if *THIS_CACHE was already allocated.
877 Return a pointer to the current aarch64_prologue_cache in
880 static struct aarch64_prologue_cache *
881 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
883 struct aarch64_prologue_cache *cache;
885 if (*this_cache != NULL)
886 return (struct aarch64_prologue_cache *) *this_cache;
888 cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
889 cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
894 cache->prev_sp = get_frame_register_unsigned (this_frame,
896 cache->prev_pc = get_frame_pc (this_frame);
897 cache->available_p = 1;
899 CATCH (ex, RETURN_MASK_ERROR)
901 if (ex.error != NOT_AVAILABLE_ERROR)
902 throw_exception (ex);
909 /* Implement the "stop_reason" frame_unwind method. */
911 static enum unwind_stop_reason
912 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
915 struct aarch64_prologue_cache *cache
916 = aarch64_make_stub_cache (this_frame, this_cache);
918 if (!cache->available_p)
919 return UNWIND_UNAVAILABLE;
921 return UNWIND_NO_REASON;
924 /* Our frame ID for a stub frame is the current SP and LR. */
927 aarch64_stub_this_id (struct frame_info *this_frame,
928 void **this_cache, struct frame_id *this_id)
930 struct aarch64_prologue_cache *cache
931 = aarch64_make_stub_cache (this_frame, this_cache);
933 if (cache->available_p)
934 *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
936 *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
939 /* Implement the "sniffer" frame_unwind method. */
942 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
943 struct frame_info *this_frame,
944 void **this_prologue_cache)
946 CORE_ADDR addr_in_block;
949 addr_in_block = get_frame_address_in_block (this_frame);
950 if (in_plt_section (addr_in_block)
951 /* We also use the stub winder if the target memory is unreadable
952 to avoid having the prologue unwinder trying to read it. */
953 || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
959 /* AArch64 stub unwinder. */
960 struct frame_unwind aarch64_stub_unwind =
963 aarch64_stub_frame_unwind_stop_reason,
964 aarch64_stub_this_id,
965 aarch64_prologue_prev_register,
967 aarch64_stub_unwind_sniffer
970 /* Return the frame base address of *THIS_FRAME. */
973 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
975 struct aarch64_prologue_cache *cache
976 = aarch64_make_prologue_cache (this_frame, this_cache);
978 return cache->prev_sp - cache->framesize;
981 /* AArch64 default frame base information. */
982 struct frame_base aarch64_normal_base =
984 &aarch64_prologue_unwind,
985 aarch64_normal_frame_base,
986 aarch64_normal_frame_base,
987 aarch64_normal_frame_base
990 /* Assuming THIS_FRAME is a dummy, return the frame ID of that
991 dummy frame. The frame ID's base needs to match the TOS value
992 saved by save_dummy_frame_tos () and returned from
993 aarch64_push_dummy_call, and the PC needs to match the dummy
994 frame's breakpoint. */
996 static struct frame_id
997 aarch64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
999 return frame_id_build (get_frame_register_unsigned (this_frame,
1001 get_frame_pc (this_frame));
1004 /* Implement the "unwind_pc" gdbarch method. */
1007 aarch64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *this_frame)
1010 = frame_unwind_register_unsigned (this_frame, AARCH64_PC_REGNUM);
1015 /* Implement the "unwind_sp" gdbarch method. */
1018 aarch64_unwind_sp (struct gdbarch *gdbarch, struct frame_info *this_frame)
1020 return frame_unwind_register_unsigned (this_frame, AARCH64_SP_REGNUM);
1023 /* Return the value of the REGNUM register in the previous frame of
1026 static struct value *
1027 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1028 void **this_cache, int regnum)
1034 case AARCH64_PC_REGNUM:
1035 lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1036 return frame_unwind_got_constant (this_frame, regnum, lr);
1039 internal_error (__FILE__, __LINE__,
1040 _("Unexpected register %d"), regnum);
1044 /* Implement the "init_reg" dwarf2_frame_ops method. */
1047 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1048 struct dwarf2_frame_state_reg *reg,
1049 struct frame_info *this_frame)
1053 case AARCH64_PC_REGNUM:
1054 reg->how = DWARF2_FRAME_REG_FN;
1055 reg->loc.fn = aarch64_dwarf2_prev_register;
1057 case AARCH64_SP_REGNUM:
1058 reg->how = DWARF2_FRAME_REG_CFA;
1063 /* When arguments must be pushed onto the stack, they go on in reverse
1064 order. The code below implements a FILO (stack) to do this. */
1068 /* Value to pass on stack. It can be NULL if this item is for stack
1070 const gdb_byte *data;
1072 /* Size in bytes of value to pass on stack. */
1076 DEF_VEC_O (stack_item_t);
1078 /* Return the alignment (in bytes) of the given type. */
1081 aarch64_type_align (struct type *t)
1087 t = check_typedef (t);
1088 switch (TYPE_CODE (t))
1091 /* Should never happen. */
1092 internal_error (__FILE__, __LINE__, _("unknown type alignment"));
1096 case TYPE_CODE_ENUM:
1100 case TYPE_CODE_RANGE:
1101 case TYPE_CODE_BITSTRING:
1103 case TYPE_CODE_RVALUE_REF:
1104 case TYPE_CODE_CHAR:
1105 case TYPE_CODE_BOOL:
1106 return TYPE_LENGTH (t);
1108 case TYPE_CODE_ARRAY:
1109 if (TYPE_VECTOR (t))
1111 /* Use the natural alignment for vector types (the same for
1112 scalar type), but the maximum alignment is 128-bit. */
1113 if (TYPE_LENGTH (t) > 16)
1116 return TYPE_LENGTH (t);
1119 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1120 case TYPE_CODE_COMPLEX:
1121 return aarch64_type_align (TYPE_TARGET_TYPE (t));
1123 case TYPE_CODE_STRUCT:
1124 case TYPE_CODE_UNION:
1126 for (n = 0; n < TYPE_NFIELDS (t); n++)
1128 falign = aarch64_type_align (TYPE_FIELD_TYPE (t, n));
1136 /* Return 1 if *TY is a homogeneous floating-point aggregate or
1137 homogeneous short-vector aggregate as defined in the AAPCS64 ABI
1138 document; otherwise return 0. */
1141 is_hfa_or_hva (struct type *ty)
1143 switch (TYPE_CODE (ty))
1145 case TYPE_CODE_ARRAY:
1147 struct type *target_ty = TYPE_TARGET_TYPE (ty);
1149 if (TYPE_VECTOR (ty))
1152 if (TYPE_LENGTH (ty) <= 4 /* HFA or HVA has at most 4 members. */
1153 && (TYPE_CODE (target_ty) == TYPE_CODE_FLT /* HFA */
1154 || (TYPE_CODE (target_ty) == TYPE_CODE_ARRAY /* HVA */
1155 && TYPE_VECTOR (target_ty))))
1160 case TYPE_CODE_UNION:
1161 case TYPE_CODE_STRUCT:
1163 /* HFA or HVA has at most four members. */
1164 if (TYPE_NFIELDS (ty) > 0 && TYPE_NFIELDS (ty) <= 4)
1166 struct type *member0_type;
1168 member0_type = check_typedef (TYPE_FIELD_TYPE (ty, 0));
1169 if (TYPE_CODE (member0_type) == TYPE_CODE_FLT
1170 || (TYPE_CODE (member0_type) == TYPE_CODE_ARRAY
1171 && TYPE_VECTOR (member0_type)))
1175 for (i = 0; i < TYPE_NFIELDS (ty); i++)
1177 struct type *member1_type;
1179 member1_type = check_typedef (TYPE_FIELD_TYPE (ty, i));
1180 if (TYPE_CODE (member0_type) != TYPE_CODE (member1_type)
1181 || (TYPE_LENGTH (member0_type)
1182 != TYPE_LENGTH (member1_type)))
1198 /* AArch64 function call information structure. */
1199 struct aarch64_call_info
1201 /* the current argument number. */
1204 /* The next general purpose register number, equivalent to NGRN as
1205 described in the AArch64 Procedure Call Standard. */
1208 /* The next SIMD and floating point register number, equivalent to
1209 NSRN as described in the AArch64 Procedure Call Standard. */
1212 /* The next stacked argument address, equivalent to NSAA as
1213 described in the AArch64 Procedure Call Standard. */
1216 /* Stack item vector. */
1217 VEC(stack_item_t) *si;
1220 /* Pass a value in a sequence of consecutive X registers. The caller
1221 is responsbile for ensuring sufficient registers are available. */
1224 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1225 struct aarch64_call_info *info, struct type *type,
1228 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1229 int len = TYPE_LENGTH (type);
1230 enum type_code typecode = TYPE_CODE (type);
1231 int regnum = AARCH64_X0_REGNUM + info->ngrn;
1232 const bfd_byte *buf = value_contents (arg);
1238 int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1239 CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1243 /* Adjust sub-word struct/union args when big-endian. */
1244 if (byte_order == BFD_ENDIAN_BIG
1245 && partial_len < X_REGISTER_SIZE
1246 && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1247 regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1251 debug_printf ("arg %d in %s = 0x%s\n", info->argnum,
1252 gdbarch_register_name (gdbarch, regnum),
1253 phex (regval, X_REGISTER_SIZE));
1255 regcache_cooked_write_unsigned (regcache, regnum, regval);
1262 /* Attempt to marshall a value in a V register. Return 1 if
1263 successful, or 0 if insufficient registers are available. This
1264 function, unlike the equivalent pass_in_x() function does not
1265 handle arguments spread across multiple registers. */
1268 pass_in_v (struct gdbarch *gdbarch,
1269 struct regcache *regcache,
1270 struct aarch64_call_info *info,
1271 int len, const bfd_byte *buf)
1275 int regnum = AARCH64_V0_REGNUM + info->nsrn;
1276 gdb_byte reg[V_REGISTER_SIZE];
1281 memset (reg, 0, sizeof (reg));
1282 /* PCS C.1, the argument is allocated to the least significant
1283 bits of V register. */
1284 memcpy (reg, buf, len);
1285 regcache_cooked_write (regcache, regnum, reg);
1289 debug_printf ("arg %d in %s\n", info->argnum,
1290 gdbarch_register_name (gdbarch, regnum));
1298 /* Marshall an argument onto the stack. */
1301 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1304 const bfd_byte *buf = value_contents (arg);
1305 int len = TYPE_LENGTH (type);
1311 align = aarch64_type_align (type);
1313 /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1314 Natural alignment of the argument's type. */
1315 align = align_up (align, 8);
1317 /* The AArch64 PCS requires at most doubleword alignment. */
1323 debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1329 VEC_safe_push (stack_item_t, info->si, &item);
1332 if (info->nsaa & (align - 1))
1334 /* Push stack alignment padding. */
1335 int pad = align - (info->nsaa & (align - 1));
1340 VEC_safe_push (stack_item_t, info->si, &item);
1345 /* Marshall an argument into a sequence of one or more consecutive X
1346 registers or, if insufficient X registers are available then onto
1350 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1351 struct aarch64_call_info *info, struct type *type,
1354 int len = TYPE_LENGTH (type);
1355 int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1357 /* PCS C.13 - Pass in registers if we have enough spare */
1358 if (info->ngrn + nregs <= 8)
1360 pass_in_x (gdbarch, regcache, info, type, arg);
1361 info->ngrn += nregs;
1366 pass_on_stack (info, type, arg);
1370 /* Pass a value in a V register, or on the stack if insufficient are
1374 pass_in_v_or_stack (struct gdbarch *gdbarch,
1375 struct regcache *regcache,
1376 struct aarch64_call_info *info,
1380 if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (type),
1381 value_contents (arg)))
1382 pass_on_stack (info, type, arg);
1385 /* Implement the "push_dummy_call" gdbarch method. */
1388 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1389 struct regcache *regcache, CORE_ADDR bp_addr,
1391 struct value **args, CORE_ADDR sp, int struct_return,
1392 CORE_ADDR struct_addr)
1395 struct aarch64_call_info info;
1396 struct type *func_type;
1397 struct type *return_type;
1398 int lang_struct_return;
1400 memset (&info, 0, sizeof (info));
1402 /* We need to know what the type of the called function is in order
1403 to determine the number of named/anonymous arguments for the
1404 actual argument placement, and the return type in order to handle
1405 return value correctly.
1407 The generic code above us views the decision of return in memory
1408 or return in registers as a two stage processes. The language
1409 handler is consulted first and may decide to return in memory (eg
1410 class with copy constructor returned by value), this will cause
1411 the generic code to allocate space AND insert an initial leading
1414 If the language code does not decide to pass in memory then the
1415 target code is consulted.
1417 If the language code decides to pass in memory we want to move
1418 the pointer inserted as the initial argument from the argument
1419 list and into X8, the conventional AArch64 struct return pointer
1422 This is slightly awkward, ideally the flag "lang_struct_return"
1423 would be passed to the targets implementation of push_dummy_call.
1424 Rather that change the target interface we call the language code
1425 directly ourselves. */
1427 func_type = check_typedef (value_type (function));
1429 /* Dereference function pointer types. */
1430 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1431 func_type = TYPE_TARGET_TYPE (func_type);
1433 gdb_assert (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1434 || TYPE_CODE (func_type) == TYPE_CODE_METHOD);
1436 /* If language_pass_by_reference () returned true we will have been
1437 given an additional initial argument, a hidden pointer to the
1438 return slot in memory. */
1439 return_type = TYPE_TARGET_TYPE (func_type);
1440 lang_struct_return = language_pass_by_reference (return_type);
1442 /* Set the return address. For the AArch64, the return breakpoint
1443 is always at BP_ADDR. */
1444 regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1446 /* If we were given an initial argument for the return slot because
1447 lang_struct_return was true, lose it. */
1448 if (lang_struct_return)
1454 /* The struct_return pointer occupies X8. */
1455 if (struct_return || lang_struct_return)
1459 debug_printf ("struct return in %s = 0x%s\n",
1460 gdbarch_register_name (gdbarch,
1461 AARCH64_STRUCT_RETURN_REGNUM),
1462 paddress (gdbarch, struct_addr));
1464 regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1468 for (argnum = 0; argnum < nargs; argnum++)
1470 struct value *arg = args[argnum];
1471 struct type *arg_type;
1474 arg_type = check_typedef (value_type (arg));
1475 len = TYPE_LENGTH (arg_type);
1477 switch (TYPE_CODE (arg_type))
1480 case TYPE_CODE_BOOL:
1481 case TYPE_CODE_CHAR:
1482 case TYPE_CODE_RANGE:
1483 case TYPE_CODE_ENUM:
1486 /* Promote to 32 bit integer. */
1487 if (TYPE_UNSIGNED (arg_type))
1488 arg_type = builtin_type (gdbarch)->builtin_uint32;
1490 arg_type = builtin_type (gdbarch)->builtin_int32;
1491 arg = value_cast (arg_type, arg);
1493 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1496 case TYPE_CODE_COMPLEX:
1499 const bfd_byte *buf = value_contents (arg);
1500 struct type *target_type =
1501 check_typedef (TYPE_TARGET_TYPE (arg_type));
1503 pass_in_v (gdbarch, regcache, &info,
1504 TYPE_LENGTH (target_type), buf);
1505 pass_in_v (gdbarch, regcache, &info,
1506 TYPE_LENGTH (target_type),
1507 buf + TYPE_LENGTH (target_type));
1512 pass_on_stack (&info, arg_type, arg);
1516 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1519 case TYPE_CODE_STRUCT:
1520 case TYPE_CODE_ARRAY:
1521 case TYPE_CODE_UNION:
1522 if (is_hfa_or_hva (arg_type))
1524 int elements = TYPE_NFIELDS (arg_type);
1526 /* Homogeneous Aggregates */
1527 if (info.nsrn + elements < 8)
1531 for (i = 0; i < elements; i++)
1533 /* We know that we have sufficient registers
1534 available therefore this will never fallback
1536 struct value *field =
1537 value_primitive_field (arg, 0, i, arg_type);
1538 struct type *field_type =
1539 check_typedef (value_type (field));
1541 pass_in_v_or_stack (gdbarch, regcache, &info,
1548 pass_on_stack (&info, arg_type, arg);
1551 else if (TYPE_CODE (arg_type) == TYPE_CODE_ARRAY
1552 && TYPE_VECTOR (arg_type) && (len == 16 || len == 8))
1554 /* Short vector types are passed in V registers. */
1555 pass_in_v_or_stack (gdbarch, regcache, &info, arg_type, arg);
1559 /* PCS B.7 Aggregates larger than 16 bytes are passed by
1560 invisible reference. */
1562 /* Allocate aligned storage. */
1563 sp = align_down (sp - len, 16);
1565 /* Write the real data into the stack. */
1566 write_memory (sp, value_contents (arg), len);
1568 /* Construct the indirection. */
1569 arg_type = lookup_pointer_type (arg_type);
1570 arg = value_from_pointer (arg_type, sp);
1571 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1574 /* PCS C.15 / C.18 multiple values pass. */
1575 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1579 pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1584 /* Make sure stack retains 16 byte alignment. */
1586 sp -= 16 - (info.nsaa & 15);
1588 while (!VEC_empty (stack_item_t, info.si))
1590 stack_item_t *si = VEC_last (stack_item_t, info.si);
1593 if (si->data != NULL)
1594 write_memory (sp, si->data, si->len);
1595 VEC_pop (stack_item_t, info.si);
1598 VEC_free (stack_item_t, info.si);
1600 /* Finally, update the SP register. */
1601 regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1606 /* Implement the "frame_align" gdbarch method. */
1609 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1611 /* Align the stack to sixteen bytes. */
1612 return sp & ~(CORE_ADDR) 15;
1615 /* Return the type for an AdvSISD Q register. */
1617 static struct type *
1618 aarch64_vnq_type (struct gdbarch *gdbarch)
1620 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1622 if (tdep->vnq_type == NULL)
1627 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1630 elem = builtin_type (gdbarch)->builtin_uint128;
1631 append_composite_type_field (t, "u", elem);
1633 elem = builtin_type (gdbarch)->builtin_int128;
1634 append_composite_type_field (t, "s", elem);
1639 return tdep->vnq_type;
1642 /* Return the type for an AdvSISD D register. */
1644 static struct type *
1645 aarch64_vnd_type (struct gdbarch *gdbarch)
1647 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1649 if (tdep->vnd_type == NULL)
1654 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1657 elem = builtin_type (gdbarch)->builtin_double;
1658 append_composite_type_field (t, "f", elem);
1660 elem = builtin_type (gdbarch)->builtin_uint64;
1661 append_composite_type_field (t, "u", elem);
1663 elem = builtin_type (gdbarch)->builtin_int64;
1664 append_composite_type_field (t, "s", elem);
1669 return tdep->vnd_type;
1672 /* Return the type for an AdvSISD S register. */
1674 static struct type *
1675 aarch64_vns_type (struct gdbarch *gdbarch)
1677 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1679 if (tdep->vns_type == NULL)
1684 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
1687 elem = builtin_type (gdbarch)->builtin_float;
1688 append_composite_type_field (t, "f", elem);
1690 elem = builtin_type (gdbarch)->builtin_uint32;
1691 append_composite_type_field (t, "u", elem);
1693 elem = builtin_type (gdbarch)->builtin_int32;
1694 append_composite_type_field (t, "s", elem);
1699 return tdep->vns_type;
1702 /* Return the type for an AdvSISD H register. */
1704 static struct type *
1705 aarch64_vnh_type (struct gdbarch *gdbarch)
1707 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1709 if (tdep->vnh_type == NULL)
1714 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
1717 elem = builtin_type (gdbarch)->builtin_uint16;
1718 append_composite_type_field (t, "u", elem);
1720 elem = builtin_type (gdbarch)->builtin_int16;
1721 append_composite_type_field (t, "s", elem);
1726 return tdep->vnh_type;
1729 /* Return the type for an AdvSISD B register. */
1731 static struct type *
1732 aarch64_vnb_type (struct gdbarch *gdbarch)
1734 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1736 if (tdep->vnb_type == NULL)
1741 t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
1744 elem = builtin_type (gdbarch)->builtin_uint8;
1745 append_composite_type_field (t, "u", elem);
1747 elem = builtin_type (gdbarch)->builtin_int8;
1748 append_composite_type_field (t, "s", elem);
1753 return tdep->vnb_type;
1756 /* Implement the "dwarf2_reg_to_regnum" gdbarch method. */
1759 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
1761 if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
1762 return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
1764 if (reg == AARCH64_DWARF_SP)
1765 return AARCH64_SP_REGNUM;
1767 if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
1768 return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
1774 /* Implement the "print_insn" gdbarch method. */
1777 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
1779 info->symbols = NULL;
1780 return print_insn_aarch64 (memaddr, info);
1783 /* AArch64 BRK software debug mode instruction.
1784 Note that AArch64 code is always little-endian.
1785 1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000. */
1786 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
1788 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
1790 /* Extract from an array REGS containing the (raw) register state a
1791 function return value of type TYPE, and copy that, in virtual
1792 format, into VALBUF. */
1795 aarch64_extract_return_value (struct type *type, struct regcache *regs,
1798 struct gdbarch *gdbarch = get_regcache_arch (regs);
1799 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1801 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1803 bfd_byte buf[V_REGISTER_SIZE];
1804 int len = TYPE_LENGTH (type);
1806 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1807 memcpy (valbuf, buf, len);
1809 else if (TYPE_CODE (type) == TYPE_CODE_INT
1810 || TYPE_CODE (type) == TYPE_CODE_CHAR
1811 || TYPE_CODE (type) == TYPE_CODE_BOOL
1812 || TYPE_CODE (type) == TYPE_CODE_PTR
1813 || TYPE_IS_REFERENCE (type)
1814 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1816 /* If the the type is a plain integer, then the access is
1817 straight-forward. Otherwise we have to play around a bit
1819 int len = TYPE_LENGTH (type);
1820 int regno = AARCH64_X0_REGNUM;
1825 /* By using store_unsigned_integer we avoid having to do
1826 anything special for small big-endian values. */
1827 regcache_cooked_read_unsigned (regs, regno++, &tmp);
1828 store_unsigned_integer (valbuf,
1829 (len > X_REGISTER_SIZE
1830 ? X_REGISTER_SIZE : len), byte_order, tmp);
1831 len -= X_REGISTER_SIZE;
1832 valbuf += X_REGISTER_SIZE;
1835 else if (TYPE_CODE (type) == TYPE_CODE_COMPLEX)
1837 int regno = AARCH64_V0_REGNUM;
1838 bfd_byte buf[V_REGISTER_SIZE];
1839 struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1840 int len = TYPE_LENGTH (target_type);
1842 regcache_cooked_read (regs, regno, buf);
1843 memcpy (valbuf, buf, len);
1845 regcache_cooked_read (regs, regno + 1, buf);
1846 memcpy (valbuf, buf, len);
1849 else if (is_hfa_or_hva (type))
1851 int elements = TYPE_NFIELDS (type);
1852 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1853 int len = TYPE_LENGTH (member_type);
1856 for (i = 0; i < elements; i++)
1858 int regno = AARCH64_V0_REGNUM + i;
1859 bfd_byte buf[V_REGISTER_SIZE];
1863 debug_printf ("read HFA or HVA return value element %d from %s\n",
1865 gdbarch_register_name (gdbarch, regno));
1867 regcache_cooked_read (regs, regno, buf);
1869 memcpy (valbuf, buf, len);
1873 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
1874 && (TYPE_LENGTH (type) == 16 || TYPE_LENGTH (type) == 8))
1876 /* Short vector is returned in V register. */
1877 gdb_byte buf[V_REGISTER_SIZE];
1879 regcache_cooked_read (regs, AARCH64_V0_REGNUM, buf);
1880 memcpy (valbuf, buf, TYPE_LENGTH (type));
1884 /* For a structure or union the behaviour is as if the value had
1885 been stored to word-aligned memory and then loaded into
1886 registers with 64-bit load instruction(s). */
1887 int len = TYPE_LENGTH (type);
1888 int regno = AARCH64_X0_REGNUM;
1889 bfd_byte buf[X_REGISTER_SIZE];
1893 regcache_cooked_read (regs, regno++, buf);
1894 memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
1895 len -= X_REGISTER_SIZE;
1896 valbuf += X_REGISTER_SIZE;
1902 /* Will a function return an aggregate type in memory or in a
1903 register? Return 0 if an aggregate type can be returned in a
1904 register, 1 if it must be returned in memory. */
1907 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
1909 type = check_typedef (type);
1911 if (is_hfa_or_hva (type))
1913 /* v0-v7 are used to return values and one register is allocated
1914 for one member. However, HFA or HVA has at most four members. */
1918 if (TYPE_LENGTH (type) > 16)
1920 /* PCS B.6 Aggregates larger than 16 bytes are passed by
1921 invisible reference. */
1929 /* Write into appropriate registers a function return value of type
1930 TYPE, given in virtual format. */
1933 aarch64_store_return_value (struct type *type, struct regcache *regs,
1934 const gdb_byte *valbuf)
1936 struct gdbarch *gdbarch = get_regcache_arch (regs);
1937 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1939 if (TYPE_CODE (type) == TYPE_CODE_FLT)
1941 bfd_byte buf[V_REGISTER_SIZE];
1942 int len = TYPE_LENGTH (type);
1944 memcpy (buf, valbuf, len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
1945 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
1947 else if (TYPE_CODE (type) == TYPE_CODE_INT
1948 || TYPE_CODE (type) == TYPE_CODE_CHAR
1949 || TYPE_CODE (type) == TYPE_CODE_BOOL
1950 || TYPE_CODE (type) == TYPE_CODE_PTR
1951 || TYPE_IS_REFERENCE (type)
1952 || TYPE_CODE (type) == TYPE_CODE_ENUM)
1954 if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
1956 /* Values of one word or less are zero/sign-extended and
1958 bfd_byte tmpbuf[X_REGISTER_SIZE];
1959 LONGEST val = unpack_long (type, valbuf);
1961 store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
1962 regcache_cooked_write (regs, AARCH64_X0_REGNUM, tmpbuf);
1966 /* Integral values greater than one word are stored in
1967 consecutive registers starting with r0. This will always
1968 be a multiple of the regiser size. */
1969 int len = TYPE_LENGTH (type);
1970 int regno = AARCH64_X0_REGNUM;
1974 regcache_cooked_write (regs, regno++, valbuf);
1975 len -= X_REGISTER_SIZE;
1976 valbuf += X_REGISTER_SIZE;
1980 else if (is_hfa_or_hva (type))
1982 int elements = TYPE_NFIELDS (type);
1983 struct type *member_type = check_typedef (TYPE_FIELD_TYPE (type, 0));
1984 int len = TYPE_LENGTH (member_type);
1987 for (i = 0; i < elements; i++)
1989 int regno = AARCH64_V0_REGNUM + i;
1990 bfd_byte tmpbuf[V_REGISTER_SIZE];
1994 debug_printf ("write HFA or HVA return value element %d to %s\n",
1996 gdbarch_register_name (gdbarch, regno));
1999 memcpy (tmpbuf, valbuf, len);
2000 regcache_cooked_write (regs, regno, tmpbuf);
2004 else if (TYPE_CODE (type) == TYPE_CODE_ARRAY && TYPE_VECTOR (type)
2005 && (TYPE_LENGTH (type) == 8 || TYPE_LENGTH (type) == 16))
2008 gdb_byte buf[V_REGISTER_SIZE];
2010 memcpy (buf, valbuf, TYPE_LENGTH (type));
2011 regcache_cooked_write (regs, AARCH64_V0_REGNUM, buf);
2015 /* For a structure or union the behaviour is as if the value had
2016 been stored to word-aligned memory and then loaded into
2017 registers with 64-bit load instruction(s). */
2018 int len = TYPE_LENGTH (type);
2019 int regno = AARCH64_X0_REGNUM;
2020 bfd_byte tmpbuf[X_REGISTER_SIZE];
2024 memcpy (tmpbuf, valbuf,
2025 len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2026 regcache_cooked_write (regs, regno++, tmpbuf);
2027 len -= X_REGISTER_SIZE;
2028 valbuf += X_REGISTER_SIZE;
2033 /* Implement the "return_value" gdbarch method. */
2035 static enum return_value_convention
2036 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2037 struct type *valtype, struct regcache *regcache,
2038 gdb_byte *readbuf, const gdb_byte *writebuf)
2041 if (TYPE_CODE (valtype) == TYPE_CODE_STRUCT
2042 || TYPE_CODE (valtype) == TYPE_CODE_UNION
2043 || TYPE_CODE (valtype) == TYPE_CODE_ARRAY)
2045 if (aarch64_return_in_memory (gdbarch, valtype))
2048 debug_printf ("return value in memory\n");
2049 return RETURN_VALUE_STRUCT_CONVENTION;
2054 aarch64_store_return_value (valtype, regcache, writebuf);
2057 aarch64_extract_return_value (valtype, regcache, readbuf);
2060 debug_printf ("return value in registers\n");
2062 return RETURN_VALUE_REGISTER_CONVENTION;
2065 /* Implement the "get_longjmp_target" gdbarch method. */
2068 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2071 gdb_byte buf[X_REGISTER_SIZE];
2072 struct gdbarch *gdbarch = get_frame_arch (frame);
2073 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2074 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2076 jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2078 if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2082 *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2086 /* Implement the "gen_return_address" gdbarch method. */
2089 aarch64_gen_return_address (struct gdbarch *gdbarch,
2090 struct agent_expr *ax, struct axs_value *value,
2093 value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2094 value->kind = axs_lvalue_register;
2095 value->u.reg = AARCH64_LR_REGNUM;
2099 /* Return the pseudo register name corresponding to register regnum. */
2102 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2104 static const char *const q_name[] =
2106 "q0", "q1", "q2", "q3",
2107 "q4", "q5", "q6", "q7",
2108 "q8", "q9", "q10", "q11",
2109 "q12", "q13", "q14", "q15",
2110 "q16", "q17", "q18", "q19",
2111 "q20", "q21", "q22", "q23",
2112 "q24", "q25", "q26", "q27",
2113 "q28", "q29", "q30", "q31",
2116 static const char *const d_name[] =
2118 "d0", "d1", "d2", "d3",
2119 "d4", "d5", "d6", "d7",
2120 "d8", "d9", "d10", "d11",
2121 "d12", "d13", "d14", "d15",
2122 "d16", "d17", "d18", "d19",
2123 "d20", "d21", "d22", "d23",
2124 "d24", "d25", "d26", "d27",
2125 "d28", "d29", "d30", "d31",
2128 static const char *const s_name[] =
2130 "s0", "s1", "s2", "s3",
2131 "s4", "s5", "s6", "s7",
2132 "s8", "s9", "s10", "s11",
2133 "s12", "s13", "s14", "s15",
2134 "s16", "s17", "s18", "s19",
2135 "s20", "s21", "s22", "s23",
2136 "s24", "s25", "s26", "s27",
2137 "s28", "s29", "s30", "s31",
2140 static const char *const h_name[] =
2142 "h0", "h1", "h2", "h3",
2143 "h4", "h5", "h6", "h7",
2144 "h8", "h9", "h10", "h11",
2145 "h12", "h13", "h14", "h15",
2146 "h16", "h17", "h18", "h19",
2147 "h20", "h21", "h22", "h23",
2148 "h24", "h25", "h26", "h27",
2149 "h28", "h29", "h30", "h31",
2152 static const char *const b_name[] =
2154 "b0", "b1", "b2", "b3",
2155 "b4", "b5", "b6", "b7",
2156 "b8", "b9", "b10", "b11",
2157 "b12", "b13", "b14", "b15",
2158 "b16", "b17", "b18", "b19",
2159 "b20", "b21", "b22", "b23",
2160 "b24", "b25", "b26", "b27",
2161 "b28", "b29", "b30", "b31",
2164 regnum -= gdbarch_num_regs (gdbarch);
2166 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2167 return q_name[regnum - AARCH64_Q0_REGNUM];
2169 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2170 return d_name[regnum - AARCH64_D0_REGNUM];
2172 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2173 return s_name[regnum - AARCH64_S0_REGNUM];
2175 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2176 return h_name[regnum - AARCH64_H0_REGNUM];
2178 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2179 return b_name[regnum - AARCH64_B0_REGNUM];
2181 internal_error (__FILE__, __LINE__,
2182 _("aarch64_pseudo_register_name: bad register number %d"),
2186 /* Implement the "pseudo_register_type" tdesc_arch_data method. */
2188 static struct type *
2189 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2191 regnum -= gdbarch_num_regs (gdbarch);
2193 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2194 return aarch64_vnq_type (gdbarch);
2196 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2197 return aarch64_vnd_type (gdbarch);
2199 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2200 return aarch64_vns_type (gdbarch);
2202 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2203 return aarch64_vnh_type (gdbarch);
2205 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2206 return aarch64_vnb_type (gdbarch);
2208 internal_error (__FILE__, __LINE__,
2209 _("aarch64_pseudo_register_type: bad register number %d"),
2213 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method. */
2216 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2217 struct reggroup *group)
2219 regnum -= gdbarch_num_regs (gdbarch);
2221 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2222 return group == all_reggroup || group == vector_reggroup;
2223 else if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2224 return (group == all_reggroup || group == vector_reggroup
2225 || group == float_reggroup);
2226 else if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2227 return (group == all_reggroup || group == vector_reggroup
2228 || group == float_reggroup);
2229 else if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2230 return group == all_reggroup || group == vector_reggroup;
2231 else if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2232 return group == all_reggroup || group == vector_reggroup;
2234 return group == all_reggroup;
2237 /* Implement the "pseudo_register_read_value" gdbarch method. */
2239 static struct value *
2240 aarch64_pseudo_read_value (struct gdbarch *gdbarch,
2241 struct regcache *regcache,
2244 gdb_byte reg_buf[V_REGISTER_SIZE];
2245 struct value *result_value;
2248 result_value = allocate_value (register_type (gdbarch, regnum));
2249 VALUE_LVAL (result_value) = lval_register;
2250 VALUE_REGNUM (result_value) = regnum;
2251 buf = value_contents_raw (result_value);
2253 regnum -= gdbarch_num_regs (gdbarch);
2255 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2257 enum register_status status;
2260 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2261 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2262 if (status != REG_VALID)
2263 mark_value_bytes_unavailable (result_value, 0,
2264 TYPE_LENGTH (value_type (result_value)));
2266 memcpy (buf, reg_buf, Q_REGISTER_SIZE);
2267 return result_value;
2270 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2272 enum register_status status;
2275 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2276 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2277 if (status != REG_VALID)
2278 mark_value_bytes_unavailable (result_value, 0,
2279 TYPE_LENGTH (value_type (result_value)));
2281 memcpy (buf, reg_buf, D_REGISTER_SIZE);
2282 return result_value;
2285 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2287 enum register_status status;
2290 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2291 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2292 if (status != REG_VALID)
2293 mark_value_bytes_unavailable (result_value, 0,
2294 TYPE_LENGTH (value_type (result_value)));
2296 memcpy (buf, reg_buf, S_REGISTER_SIZE);
2297 return result_value;
2300 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2302 enum register_status status;
2305 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2306 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2307 if (status != REG_VALID)
2308 mark_value_bytes_unavailable (result_value, 0,
2309 TYPE_LENGTH (value_type (result_value)));
2311 memcpy (buf, reg_buf, H_REGISTER_SIZE);
2312 return result_value;
2315 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2317 enum register_status status;
2320 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2321 status = regcache_raw_read (regcache, v_regnum, reg_buf);
2322 if (status != REG_VALID)
2323 mark_value_bytes_unavailable (result_value, 0,
2324 TYPE_LENGTH (value_type (result_value)));
2326 memcpy (buf, reg_buf, B_REGISTER_SIZE);
2327 return result_value;
2330 gdb_assert_not_reached ("regnum out of bound");
2333 /* Implement the "pseudo_register_write" gdbarch method. */
2336 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2337 int regnum, const gdb_byte *buf)
2339 gdb_byte reg_buf[V_REGISTER_SIZE];
2341 /* Ensure the register buffer is zero, we want gdb writes of the
2342 various 'scalar' pseudo registers to behavior like architectural
2343 writes, register width bytes are written the remainder are set to
2345 memset (reg_buf, 0, sizeof (reg_buf));
2347 regnum -= gdbarch_num_regs (gdbarch);
2349 if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2351 /* pseudo Q registers */
2354 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_Q0_REGNUM;
2355 memcpy (reg_buf, buf, Q_REGISTER_SIZE);
2356 regcache_raw_write (regcache, v_regnum, reg_buf);
2360 if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2362 /* pseudo D registers */
2365 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_D0_REGNUM;
2366 memcpy (reg_buf, buf, D_REGISTER_SIZE);
2367 regcache_raw_write (regcache, v_regnum, reg_buf);
2371 if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2375 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_S0_REGNUM;
2376 memcpy (reg_buf, buf, S_REGISTER_SIZE);
2377 regcache_raw_write (regcache, v_regnum, reg_buf);
2381 if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2383 /* pseudo H registers */
2386 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_H0_REGNUM;
2387 memcpy (reg_buf, buf, H_REGISTER_SIZE);
2388 regcache_raw_write (regcache, v_regnum, reg_buf);
2392 if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2394 /* pseudo B registers */
2397 v_regnum = AARCH64_V0_REGNUM + regnum - AARCH64_B0_REGNUM;
2398 memcpy (reg_buf, buf, B_REGISTER_SIZE);
2399 regcache_raw_write (regcache, v_regnum, reg_buf);
2403 gdb_assert_not_reached ("regnum out of bound");
2406 /* Callback function for user_reg_add. */
2408 static struct value *
2409 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2411 const int *reg_p = (const int *) baton;
2413 return value_of_register (*reg_p, frame);
2417 /* Implement the "software_single_step" gdbarch method, needed to
2418 single step through atomic sequences on AArch64. */
2420 static std::vector<CORE_ADDR>
2421 aarch64_software_single_step (struct regcache *regcache)
2423 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2424 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2425 const int insn_size = 4;
2426 const int atomic_sequence_length = 16; /* Instruction sequence length. */
2427 CORE_ADDR pc = regcache_read_pc (regcache);
2428 CORE_ADDR breaks[2] = { -1, -1 };
2430 CORE_ADDR closing_insn = 0;
2431 uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2432 byte_order_for_code);
2435 int bc_insn_count = 0; /* Conditional branch instruction count. */
2436 int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed). */
2439 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2442 /* Look for a Load Exclusive instruction which begins the sequence. */
2443 if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2446 for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2449 insn = read_memory_unsigned_integer (loc, insn_size,
2450 byte_order_for_code);
2452 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2454 /* Check if the instruction is a conditional branch. */
2455 if (inst.opcode->iclass == condbranch)
2457 gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2459 if (bc_insn_count >= 1)
2462 /* It is, so we'll try to set a breakpoint at the destination. */
2463 breaks[1] = loc + inst.operands[0].imm.value;
2469 /* Look for the Store Exclusive which closes the atomic sequence. */
2470 if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2477 /* We didn't find a closing Store Exclusive instruction, fall back. */
2481 /* Insert breakpoint after the end of the atomic sequence. */
2482 breaks[0] = loc + insn_size;
2484 /* Check for duplicated breakpoints, and also check that the second
2485 breakpoint is not within the atomic sequence. */
2487 && (breaks[1] == breaks[0]
2488 || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2489 last_breakpoint = 0;
2491 std::vector<CORE_ADDR> next_pcs;
2493 /* Insert the breakpoint at the end of the sequence, and one at the
2494 destination of the conditional branch, if it exists. */
2495 for (index = 0; index <= last_breakpoint; index++)
2496 next_pcs.push_back (breaks[index]);
2501 struct displaced_step_closure
2503 /* It is true when condition instruction, such as B.CON, TBZ, etc,
2504 is being displaced stepping. */
2507 /* PC adjustment offset after displaced stepping. */
2511 /* Data when visiting instructions for displaced stepping. */
2513 struct aarch64_displaced_step_data
2515 struct aarch64_insn_data base;
2517 /* The address where the instruction will be executed at. */
2519 /* Buffer of instructions to be copied to NEW_ADDR to execute. */
2520 uint32_t insn_buf[DISPLACED_MODIFIED_INSNS];
2521 /* Number of instructions in INSN_BUF. */
2522 unsigned insn_count;
2523 /* Registers when doing displaced stepping. */
2524 struct regcache *regs;
2526 struct displaced_step_closure *dsc;
2529 /* Implementation of aarch64_insn_visitor method "b". */
2532 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2533 struct aarch64_insn_data *data)
2535 struct aarch64_displaced_step_data *dsd
2536 = (struct aarch64_displaced_step_data *) data;
2537 int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2539 if (can_encode_int32 (new_offset, 28))
2541 /* Emit B rather than BL, because executing BL on a new address
2542 will get the wrong address into LR. In order to avoid this,
2543 we emit B, and update LR if the instruction is BL. */
2544 emit_b (dsd->insn_buf, 0, new_offset);
2550 emit_nop (dsd->insn_buf);
2552 dsd->dsc->pc_adjust = offset;
2558 regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2559 data->insn_addr + 4);
2563 /* Implementation of aarch64_insn_visitor method "b_cond". */
2566 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2567 struct aarch64_insn_data *data)
2569 struct aarch64_displaced_step_data *dsd
2570 = (struct aarch64_displaced_step_data *) data;
2572 /* GDB has to fix up PC after displaced step this instruction
2573 differently according to the condition is true or false. Instead
2574 of checking COND against conditional flags, we can use
2575 the following instructions, and GDB can tell how to fix up PC
2576 according to the PC value.
2578 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2584 emit_bcond (dsd->insn_buf, cond, 8);
2586 dsd->dsc->pc_adjust = offset;
2587 dsd->insn_count = 1;
2590 /* Dynamically allocate a new register. If we know the register
2591 statically, we should make it a global as above instead of using this
2594 static struct aarch64_register
2595 aarch64_register (unsigned num, int is64)
2597 return (struct aarch64_register) { num, is64 };
2600 /* Implementation of aarch64_insn_visitor method "cb". */
2603 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
2604 const unsigned rn, int is64,
2605 struct aarch64_insn_data *data)
2607 struct aarch64_displaced_step_data *dsd
2608 = (struct aarch64_displaced_step_data *) data;
2610 /* The offset is out of range for a compare and branch
2611 instruction. We can use the following instructions instead:
2613 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2618 emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
2619 dsd->insn_count = 1;
2621 dsd->dsc->pc_adjust = offset;
2624 /* Implementation of aarch64_insn_visitor method "tb". */
2627 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
2628 const unsigned rt, unsigned bit,
2629 struct aarch64_insn_data *data)
2631 struct aarch64_displaced_step_data *dsd
2632 = (struct aarch64_displaced_step_data *) data;
2634 /* The offset is out of range for a test bit and branch
2635 instruction We can use the following instructions instead:
2637 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2643 emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
2644 dsd->insn_count = 1;
2646 dsd->dsc->pc_adjust = offset;
2649 /* Implementation of aarch64_insn_visitor method "adr". */
2652 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
2653 const int is_adrp, struct aarch64_insn_data *data)
2655 struct aarch64_displaced_step_data *dsd
2656 = (struct aarch64_displaced_step_data *) data;
2657 /* We know exactly the address the ADR{P,} instruction will compute.
2658 We can just write it to the destination register. */
2659 CORE_ADDR address = data->insn_addr + offset;
2663 /* Clear the lower 12 bits of the offset to get the 4K page. */
2664 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2668 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
2671 dsd->dsc->pc_adjust = 4;
2672 emit_nop (dsd->insn_buf);
2673 dsd->insn_count = 1;
2676 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2679 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
2680 const unsigned rt, const int is64,
2681 struct aarch64_insn_data *data)
2683 struct aarch64_displaced_step_data *dsd
2684 = (struct aarch64_displaced_step_data *) data;
2685 CORE_ADDR address = data->insn_addr + offset;
2686 struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
2688 regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
2692 dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
2693 aarch64_register (rt, 1), zero);
2695 dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
2696 aarch64_register (rt, 1), zero);
2698 dsd->dsc->pc_adjust = 4;
2701 /* Implementation of aarch64_insn_visitor method "others". */
2704 aarch64_displaced_step_others (const uint32_t insn,
2705 struct aarch64_insn_data *data)
2707 struct aarch64_displaced_step_data *dsd
2708 = (struct aarch64_displaced_step_data *) data;
2710 aarch64_emit_insn (dsd->insn_buf, insn);
2711 dsd->insn_count = 1;
2713 if ((insn & 0xfffffc1f) == 0xd65f0000)
2716 dsd->dsc->pc_adjust = 0;
2719 dsd->dsc->pc_adjust = 4;
2722 static const struct aarch64_insn_visitor visitor =
2724 aarch64_displaced_step_b,
2725 aarch64_displaced_step_b_cond,
2726 aarch64_displaced_step_cb,
2727 aarch64_displaced_step_tb,
2728 aarch64_displaced_step_adr,
2729 aarch64_displaced_step_ldr_literal,
2730 aarch64_displaced_step_others,
2733 /* Implement the "displaced_step_copy_insn" gdbarch method. */
2735 struct displaced_step_closure *
2736 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
2737 CORE_ADDR from, CORE_ADDR to,
2738 struct regcache *regs)
2740 struct displaced_step_closure *dsc = NULL;
2741 enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2742 uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
2743 struct aarch64_displaced_step_data dsd;
2746 if (aarch64_decode_insn (insn, &inst, 1) != 0)
2749 /* Look for a Load Exclusive instruction which begins the sequence. */
2750 if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
2752 /* We can't displaced step atomic sequences. */
2756 dsc = XCNEW (struct displaced_step_closure);
2757 dsd.base.insn_addr = from;
2762 aarch64_relocate_instruction (insn, &visitor,
2763 (struct aarch64_insn_data *) &dsd);
2764 gdb_assert (dsd.insn_count <= DISPLACED_MODIFIED_INSNS);
2766 if (dsd.insn_count != 0)
2770 /* Instruction can be relocated to scratch pad. Copy
2771 relocated instruction(s) there. */
2772 for (i = 0; i < dsd.insn_count; i++)
2774 if (debug_displaced)
2776 debug_printf ("displaced: writing insn ");
2777 debug_printf ("%.8x", dsd.insn_buf[i]);
2778 debug_printf (" at %s\n", paddress (gdbarch, to + i * 4));
2780 write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
2781 (ULONGEST) dsd.insn_buf[i]);
2793 /* Implement the "displaced_step_fixup" gdbarch method. */
2796 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
2797 struct displaced_step_closure *dsc,
2798 CORE_ADDR from, CORE_ADDR to,
2799 struct regcache *regs)
2805 regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
2808 /* Condition is true. */
2810 else if (pc - to == 4)
2812 /* Condition is false. */
2816 gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
2819 if (dsc->pc_adjust != 0)
2821 if (debug_displaced)
2823 debug_printf ("displaced: fixup: set PC to %s:%d\n",
2824 paddress (gdbarch, from), dsc->pc_adjust);
2826 regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
2827 from + dsc->pc_adjust);
2831 /* Implement the "displaced_step_hw_singlestep" gdbarch method. */
2834 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch,
2835 struct displaced_step_closure *closure)
2840 /* Initialize the current architecture based on INFO. If possible,
2841 re-use an architecture from ARCHES, which is a list of
2842 architectures already created during this debugging session.
2844 Called e.g. at program startup, when reading a core file, and when
2845 reading a binary file. */
2847 static struct gdbarch *
2848 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2850 struct gdbarch_tdep *tdep;
2851 struct gdbarch *gdbarch;
2852 struct gdbarch_list *best_arch;
2853 struct tdesc_arch_data *tdesc_data = NULL;
2854 const struct target_desc *tdesc = info.target_desc;
2857 const struct tdesc_feature *feature;
2859 int num_pseudo_regs = 0;
2861 /* Ensure we always have a target descriptor. */
2862 if (!tdesc_has_registers (tdesc))
2863 tdesc = tdesc_aarch64;
2867 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.core");
2869 if (feature == NULL)
2872 tdesc_data = tdesc_data_alloc ();
2874 /* Validate the descriptor provides the mandatory core R registers
2875 and allocate their numbers. */
2876 for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
2878 tdesc_numbered_register (feature, tdesc_data, AARCH64_X0_REGNUM + i,
2879 aarch64_r_register_names[i]);
2881 num_regs = AARCH64_X0_REGNUM + i;
2883 /* Look for the V registers. */
2884 feature = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
2887 /* Validate the descriptor provides the mandatory V registers
2888 and allocate their numbers. */
2889 for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
2891 tdesc_numbered_register (feature, tdesc_data, AARCH64_V0_REGNUM + i,
2892 aarch64_v_register_names[i]);
2894 num_regs = AARCH64_V0_REGNUM + i;
2896 num_pseudo_regs += 32; /* add the Qn scalar register pseudos */
2897 num_pseudo_regs += 32; /* add the Dn scalar register pseudos */
2898 num_pseudo_regs += 32; /* add the Sn scalar register pseudos */
2899 num_pseudo_regs += 32; /* add the Hn scalar register pseudos */
2900 num_pseudo_regs += 32; /* add the Bn scalar register pseudos */
2905 tdesc_data_cleanup (tdesc_data);
2909 /* AArch64 code is always little-endian. */
2910 info.byte_order_for_code = BFD_ENDIAN_LITTLE;
2912 /* If there is already a candidate, use it. */
2913 for (best_arch = gdbarch_list_lookup_by_info (arches, &info);
2915 best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
2917 /* Found a match. */
2921 if (best_arch != NULL)
2923 if (tdesc_data != NULL)
2924 tdesc_data_cleanup (tdesc_data);
2925 return best_arch->gdbarch;
2928 tdep = XCNEW (struct gdbarch_tdep);
2929 gdbarch = gdbarch_alloc (&info, tdep);
2931 /* This should be low enough for everything. */
2932 tdep->lowest_pc = 0x20;
2933 tdep->jb_pc = -1; /* Longjump support not enabled by default. */
2934 tdep->jb_elt_size = 8;
2936 set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
2937 set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
2939 /* Frame handling. */
2940 set_gdbarch_dummy_id (gdbarch, aarch64_dummy_id);
2941 set_gdbarch_unwind_pc (gdbarch, aarch64_unwind_pc);
2942 set_gdbarch_unwind_sp (gdbarch, aarch64_unwind_sp);
2944 /* Advance PC across function entry code. */
2945 set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
2947 /* The stack grows downward. */
2948 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2950 /* Breakpoint manipulation. */
2951 set_gdbarch_breakpoint_kind_from_pc (gdbarch,
2952 aarch64_breakpoint::kind_from_pc);
2953 set_gdbarch_sw_breakpoint_from_kind (gdbarch,
2954 aarch64_breakpoint::bp_from_kind);
2955 set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
2956 set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
2958 /* Information about registers, etc. */
2959 set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
2960 set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
2961 set_gdbarch_num_regs (gdbarch, num_regs);
2963 set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
2964 set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
2965 set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
2966 set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
2967 set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
2968 set_tdesc_pseudo_register_reggroup_p (gdbarch,
2969 aarch64_pseudo_register_reggroup_p);
2972 set_gdbarch_short_bit (gdbarch, 16);
2973 set_gdbarch_int_bit (gdbarch, 32);
2974 set_gdbarch_float_bit (gdbarch, 32);
2975 set_gdbarch_double_bit (gdbarch, 64);
2976 set_gdbarch_long_double_bit (gdbarch, 128);
2977 set_gdbarch_long_bit (gdbarch, 64);
2978 set_gdbarch_long_long_bit (gdbarch, 64);
2979 set_gdbarch_ptr_bit (gdbarch, 64);
2980 set_gdbarch_char_signed (gdbarch, 0);
2981 set_gdbarch_wchar_signed (gdbarch, 0);
2982 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2983 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2984 set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
2986 /* Internal <-> external register number maps. */
2987 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
2989 /* Returning results. */
2990 set_gdbarch_return_value (gdbarch, aarch64_return_value);
2993 set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
2995 /* Virtual tables. */
2996 set_gdbarch_vbit_in_delta (gdbarch, 1);
2998 /* Hook in the ABI-specific overrides, if they have been registered. */
2999 info.target_desc = tdesc;
3000 info.tdep_info = (void *) tdesc_data;
3001 gdbarch_init_osabi (info, gdbarch);
3003 dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3005 /* Add some default predicates. */
3006 frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3007 dwarf2_append_unwinders (gdbarch);
3008 frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3010 frame_base_set_default (gdbarch, &aarch64_normal_base);
3012 /* Now we have tuned the configuration, set a few final things,
3013 based on what the OS ABI has told us. */
3015 if (tdep->jb_pc >= 0)
3016 set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3018 set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3020 tdesc_use_registers (gdbarch, tdesc, tdesc_data);
3022 /* Add standard register aliases. */
3023 for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3024 user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3025 value_of_aarch64_user_reg,
3026 &aarch64_register_aliases[i].regnum);
3032 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3034 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3039 fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3040 paddress (gdbarch, tdep->lowest_pc));
3046 static void aarch64_process_record_test (void);
3050 /* Suppress warning from -Wmissing-prototypes. */
3051 extern initialize_file_ftype _initialize_aarch64_tdep;
3054 _initialize_aarch64_tdep (void)
3056 gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3059 initialize_tdesc_aarch64 ();
3061 /* Debug this file's internals. */
3062 add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3063 Set AArch64 debugging."), _("\
3064 Show AArch64 debugging."), _("\
3065 When on, AArch64 specific debugging is enabled."),
3068 &setdebuglist, &showdebuglist);
3071 register_self_test (selftests::aarch64_analyze_prologue_test);
3072 register_self_test (selftests::aarch64_process_record_test);
3076 /* AArch64 process record-replay related structures, defines etc. */
3078 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3081 unsigned int reg_len = LENGTH; \
3084 REGS = XNEWVEC (uint32_t, reg_len); \
3085 memcpy(®S[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3090 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3093 unsigned int mem_len = LENGTH; \
3096 MEMS = XNEWVEC (struct aarch64_mem_r, mem_len); \
3097 memcpy(&MEMS->len, &RECORD_BUF[0], \
3098 sizeof(struct aarch64_mem_r) * LENGTH); \
3103 /* AArch64 record/replay structures and enumerations. */
3105 struct aarch64_mem_r
3107 uint64_t len; /* Record length. */
3108 uint64_t addr; /* Memory address. */
3111 enum aarch64_record_result
3113 AARCH64_RECORD_SUCCESS,
3114 AARCH64_RECORD_UNSUPPORTED,
3115 AARCH64_RECORD_UNKNOWN
3118 typedef struct insn_decode_record_t
3120 struct gdbarch *gdbarch;
3121 struct regcache *regcache;
3122 CORE_ADDR this_addr; /* Address of insn to be recorded. */
3123 uint32_t aarch64_insn; /* Insn to be recorded. */
3124 uint32_t mem_rec_count; /* Count of memory records. */
3125 uint32_t reg_rec_count; /* Count of register records. */
3126 uint32_t *aarch64_regs; /* Registers to be recorded. */
3127 struct aarch64_mem_r *aarch64_mems; /* Memory locations to be recorded. */
3128 } insn_decode_record;
3130 /* Record handler for data processing - register instructions. */
3133 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3135 uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3136 uint32_t record_buf[4];
3138 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3139 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3140 insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3142 if (!bit (aarch64_insn_r->aarch64_insn, 28))
3146 /* Logical (shifted register). */
3147 if (insn_bits24_27 == 0x0a)
3148 setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3150 else if (insn_bits24_27 == 0x0b)
3151 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3153 return AARCH64_RECORD_UNKNOWN;
3155 record_buf[0] = reg_rd;
3156 aarch64_insn_r->reg_rec_count = 1;
3158 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3162 if (insn_bits24_27 == 0x0b)
3164 /* Data-processing (3 source). */
3165 record_buf[0] = reg_rd;
3166 aarch64_insn_r->reg_rec_count = 1;
3168 else if (insn_bits24_27 == 0x0a)
3170 if (insn_bits21_23 == 0x00)
3172 /* Add/subtract (with carry). */
3173 record_buf[0] = reg_rd;
3174 aarch64_insn_r->reg_rec_count = 1;
3175 if (bit (aarch64_insn_r->aarch64_insn, 29))
3177 record_buf[1] = AARCH64_CPSR_REGNUM;
3178 aarch64_insn_r->reg_rec_count = 2;
3181 else if (insn_bits21_23 == 0x02)
3183 /* Conditional compare (register) and conditional compare
3184 (immediate) instructions. */
3185 record_buf[0] = AARCH64_CPSR_REGNUM;
3186 aarch64_insn_r->reg_rec_count = 1;
3188 else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3190 /* CConditional select. */
3191 /* Data-processing (2 source). */
3192 /* Data-processing (1 source). */
3193 record_buf[0] = reg_rd;
3194 aarch64_insn_r->reg_rec_count = 1;
3197 return AARCH64_RECORD_UNKNOWN;
3201 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3203 return AARCH64_RECORD_SUCCESS;
3206 /* Record handler for data processing - immediate instructions. */
3209 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3211 uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3212 uint32_t record_buf[4];
3214 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3215 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3216 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3218 if (insn_bits24_27 == 0x00 /* PC rel addressing. */
3219 || insn_bits24_27 == 0x03 /* Bitfield and Extract. */
3220 || (insn_bits24_27 == 0x02 && insn_bit23)) /* Move wide (immediate). */
3222 record_buf[0] = reg_rd;
3223 aarch64_insn_r->reg_rec_count = 1;
3225 else if (insn_bits24_27 == 0x01)
3227 /* Add/Subtract (immediate). */
3228 setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3229 record_buf[0] = reg_rd;
3230 aarch64_insn_r->reg_rec_count = 1;
3232 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3234 else if (insn_bits24_27 == 0x02 && !insn_bit23)
3236 /* Logical (immediate). */
3237 setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3238 record_buf[0] = reg_rd;
3239 aarch64_insn_r->reg_rec_count = 1;
3241 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3244 return AARCH64_RECORD_UNKNOWN;
3246 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3248 return AARCH64_RECORD_SUCCESS;
3251 /* Record handler for branch, exception generation and system instructions. */
3254 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3256 struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3257 uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3258 uint32_t record_buf[4];
3260 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3261 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3262 insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3264 if (insn_bits28_31 == 0x0d)
3266 /* Exception generation instructions. */
3267 if (insn_bits24_27 == 0x04)
3269 if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3270 && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3271 && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3273 ULONGEST svc_number;
3275 regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3277 return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3281 return AARCH64_RECORD_UNSUPPORTED;
3283 /* System instructions. */
3284 else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3286 uint32_t reg_rt, reg_crn;
3288 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3289 reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3291 /* Record rt in case of sysl and mrs instructions. */
3292 if (bit (aarch64_insn_r->aarch64_insn, 21))
3294 record_buf[0] = reg_rt;
3295 aarch64_insn_r->reg_rec_count = 1;
3297 /* Record cpsr for hint and msr(immediate) instructions. */
3298 else if (reg_crn == 0x02 || reg_crn == 0x04)
3300 record_buf[0] = AARCH64_CPSR_REGNUM;
3301 aarch64_insn_r->reg_rec_count = 1;
3304 /* Unconditional branch (register). */
3305 else if((insn_bits24_27 & 0x0e) == 0x06)
3307 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3308 if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3309 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3312 return AARCH64_RECORD_UNKNOWN;
3314 /* Unconditional branch (immediate). */
3315 else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3317 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3318 if (bit (aarch64_insn_r->aarch64_insn, 31))
3319 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3322 /* Compare & branch (immediate), Test & branch (immediate) and
3323 Conditional branch (immediate). */
3324 record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3326 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3328 return AARCH64_RECORD_SUCCESS;
3331 /* Record handler for advanced SIMD load and store instructions. */
3334 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3337 uint64_t addr_offset = 0;
3338 uint32_t record_buf[24];
3339 uint64_t record_buf_mem[24];
3340 uint32_t reg_rn, reg_rt;
3341 uint32_t reg_index = 0, mem_index = 0;
3342 uint8_t opcode_bits, size_bits;
3344 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3345 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3346 size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3347 opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3348 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3351 debug_printf ("Process record: Advanced SIMD load/store\n");
3353 /* Load/store single structure. */
3354 if (bit (aarch64_insn_r->aarch64_insn, 24))
3356 uint8_t sindex, scale, selem, esize, replicate = 0;
3357 scale = opcode_bits >> 2;
3358 selem = ((opcode_bits & 0x02) |
3359 bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3363 if (size_bits & 0x01)
3364 return AARCH64_RECORD_UNKNOWN;
3367 if ((size_bits >> 1) & 0x01)
3368 return AARCH64_RECORD_UNKNOWN;
3369 if (size_bits & 0x01)
3371 if (!((opcode_bits >> 1) & 0x01))
3374 return AARCH64_RECORD_UNKNOWN;
3378 if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3385 return AARCH64_RECORD_UNKNOWN;
3391 for (sindex = 0; sindex < selem; sindex++)
3393 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3394 reg_rt = (reg_rt + 1) % 32;
3398 for (sindex = 0; sindex < selem; sindex++)
3400 if (bit (aarch64_insn_r->aarch64_insn, 22))
3401 record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3404 record_buf_mem[mem_index++] = esize / 8;
3405 record_buf_mem[mem_index++] = address + addr_offset;
3407 addr_offset = addr_offset + (esize / 8);
3408 reg_rt = (reg_rt + 1) % 32;
3412 /* Load/store multiple structure. */
3415 uint8_t selem, esize, rpt, elements;
3416 uint8_t eindex, rindex;
3418 esize = 8 << size_bits;
3419 if (bit (aarch64_insn_r->aarch64_insn, 30))
3420 elements = 128 / esize;
3422 elements = 64 / esize;
3424 switch (opcode_bits)
3426 /*LD/ST4 (4 Registers). */
3431 /*LD/ST1 (4 Registers). */
3436 /*LD/ST3 (3 Registers). */
3441 /*LD/ST1 (3 Registers). */
3446 /*LD/ST1 (1 Register). */
3451 /*LD/ST2 (2 Registers). */
3456 /*LD/ST1 (2 Registers). */
3462 return AARCH64_RECORD_UNSUPPORTED;
3465 for (rindex = 0; rindex < rpt; rindex++)
3466 for (eindex = 0; eindex < elements; eindex++)
3468 uint8_t reg_tt, sindex;
3469 reg_tt = (reg_rt + rindex) % 32;
3470 for (sindex = 0; sindex < selem; sindex++)
3472 if (bit (aarch64_insn_r->aarch64_insn, 22))
3473 record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
3476 record_buf_mem[mem_index++] = esize / 8;
3477 record_buf_mem[mem_index++] = address + addr_offset;
3479 addr_offset = addr_offset + (esize / 8);
3480 reg_tt = (reg_tt + 1) % 32;
3485 if (bit (aarch64_insn_r->aarch64_insn, 23))
3486 record_buf[reg_index++] = reg_rn;
3488 aarch64_insn_r->reg_rec_count = reg_index;
3489 aarch64_insn_r->mem_rec_count = mem_index / 2;
3490 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3492 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3494 return AARCH64_RECORD_SUCCESS;
3497 /* Record handler for load and store instructions. */
3500 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
3502 uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
3503 uint8_t insn_bit23, insn_bit21;
3504 uint8_t opc, size_bits, ld_flag, vector_flag;
3505 uint32_t reg_rn, reg_rt, reg_rt2;
3506 uint64_t datasize, offset;
3507 uint32_t record_buf[8];
3508 uint64_t record_buf_mem[8];
3511 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3512 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3513 insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
3514 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3515 insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3516 ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
3517 vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
3518 reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3519 reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3520 reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
3521 size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
3523 /* Load/store exclusive. */
3524 if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
3527 debug_printf ("Process record: load/store exclusive\n");
3531 record_buf[0] = reg_rt;
3532 aarch64_insn_r->reg_rec_count = 1;
3535 record_buf[1] = reg_rt2;
3536 aarch64_insn_r->reg_rec_count = 2;
3542 datasize = (8 << size_bits) * 2;
3544 datasize = (8 << size_bits);
3545 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3547 record_buf_mem[0] = datasize / 8;
3548 record_buf_mem[1] = address;
3549 aarch64_insn_r->mem_rec_count = 1;
3552 /* Save register rs. */
3553 record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
3554 aarch64_insn_r->reg_rec_count = 1;
3558 /* Load register (literal) instructions decoding. */
3559 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
3562 debug_printf ("Process record: load register (literal)\n");
3564 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3566 record_buf[0] = reg_rt;
3567 aarch64_insn_r->reg_rec_count = 1;
3569 /* All types of load/store pair instructions decoding. */
3570 else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
3573 debug_printf ("Process record: load/store pair\n");
3579 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3580 record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
3584 record_buf[0] = reg_rt;
3585 record_buf[1] = reg_rt2;
3587 aarch64_insn_r->reg_rec_count = 2;
3592 imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
3594 size_bits = size_bits >> 1;
3595 datasize = 8 << (2 + size_bits);
3596 offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
3597 offset = offset << (2 + size_bits);
3598 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3600 if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
3602 if (imm7_off & 0x40)
3603 address = address - offset;
3605 address = address + offset;
3608 record_buf_mem[0] = datasize / 8;
3609 record_buf_mem[1] = address;
3610 record_buf_mem[2] = datasize / 8;
3611 record_buf_mem[3] = address + (datasize / 8);
3612 aarch64_insn_r->mem_rec_count = 2;
3614 if (bit (aarch64_insn_r->aarch64_insn, 23))
3615 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3617 /* Load/store register (unsigned immediate) instructions. */
3618 else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
3620 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3630 if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
3632 /* PRFM (immediate) */
3633 return AARCH64_RECORD_SUCCESS;
3635 else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
3637 /* LDRSW (immediate) */
3651 debug_printf ("Process record: load/store (unsigned immediate):"
3652 " size %x V %d opc %x\n", size_bits, vector_flag,
3658 offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
3659 datasize = 8 << size_bits;
3660 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3662 offset = offset << size_bits;
3663 address = address + offset;
3665 record_buf_mem[0] = datasize >> 3;
3666 record_buf_mem[1] = address;
3667 aarch64_insn_r->mem_rec_count = 1;
3672 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3674 record_buf[0] = reg_rt;
3675 aarch64_insn_r->reg_rec_count = 1;
3678 /* Load/store register (register offset) instructions. */
3679 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3680 && insn_bits10_11 == 0x02 && insn_bit21)
3683 debug_printf ("Process record: load/store (register offset)\n");
3684 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3691 if (size_bits != 0x03)
3694 return AARCH64_RECORD_UNKNOWN;
3698 ULONGEST reg_rm_val;
3700 regcache_raw_read_unsigned (aarch64_insn_r->regcache,
3701 bits (aarch64_insn_r->aarch64_insn, 16, 20), ®_rm_val);
3702 if (bit (aarch64_insn_r->aarch64_insn, 12))
3703 offset = reg_rm_val << size_bits;
3705 offset = reg_rm_val;
3706 datasize = 8 << size_bits;
3707 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3709 address = address + offset;
3710 record_buf_mem[0] = datasize >> 3;
3711 record_buf_mem[1] = address;
3712 aarch64_insn_r->mem_rec_count = 1;
3717 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3719 record_buf[0] = reg_rt;
3720 aarch64_insn_r->reg_rec_count = 1;
3723 /* Load/store register (immediate and unprivileged) instructions. */
3724 else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
3729 debug_printf ("Process record: load/store "
3730 "(immediate and unprivileged)\n");
3732 opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3739 if (size_bits != 0x03)
3742 return AARCH64_RECORD_UNKNOWN;
3747 imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
3748 offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
3749 datasize = 8 << size_bits;
3750 regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
3752 if (insn_bits10_11 != 0x01)
3754 if (imm9_off & 0x0100)
3755 address = address - offset;
3757 address = address + offset;
3759 record_buf_mem[0] = datasize >> 3;
3760 record_buf_mem[1] = address;
3761 aarch64_insn_r->mem_rec_count = 1;
3766 record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
3768 record_buf[0] = reg_rt;
3769 aarch64_insn_r->reg_rec_count = 1;
3771 if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
3772 record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
3774 /* Advanced SIMD load/store instructions. */
3776 return aarch64_record_asimd_load_store (aarch64_insn_r);
3778 MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
3780 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3782 return AARCH64_RECORD_SUCCESS;
3785 /* Record handler for data processing SIMD and floating point instructions. */
3788 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
3790 uint8_t insn_bit21, opcode, rmode, reg_rd;
3791 uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
3792 uint8_t insn_bits11_14;
3793 uint32_t record_buf[2];
3795 insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3796 insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3797 insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3798 insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3799 insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
3800 opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
3801 rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
3802 reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3803 insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
3806 debug_printf ("Process record: data processing SIMD/FP: ");
3808 if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
3810 /* Floating point - fixed point conversion instructions. */
3814 debug_printf ("FP - fixed point conversion");
3816 if ((opcode >> 1) == 0x0 && rmode == 0x03)
3817 record_buf[0] = reg_rd;
3819 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3821 /* Floating point - conditional compare instructions. */
3822 else if (insn_bits10_11 == 0x01)
3825 debug_printf ("FP - conditional compare");
3827 record_buf[0] = AARCH64_CPSR_REGNUM;
3829 /* Floating point - data processing (2-source) and
3830 conditional select instructions. */
3831 else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
3834 debug_printf ("FP - DP (2-source)");
3836 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3838 else if (insn_bits10_11 == 0x00)
3840 /* Floating point - immediate instructions. */
3841 if ((insn_bits12_15 & 0x01) == 0x01
3842 || (insn_bits12_15 & 0x07) == 0x04)
3845 debug_printf ("FP - immediate");
3846 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3848 /* Floating point - compare instructions. */
3849 else if ((insn_bits12_15 & 0x03) == 0x02)
3852 debug_printf ("FP - immediate");
3853 record_buf[0] = AARCH64_CPSR_REGNUM;
3855 /* Floating point - integer conversions instructions. */
3856 else if (insn_bits12_15 == 0x00)
3858 /* Convert float to integer instruction. */
3859 if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
3862 debug_printf ("float to int conversion");
3864 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3866 /* Convert integer to float instruction. */
3867 else if ((opcode >> 1) == 0x01 && !rmode)
3870 debug_printf ("int to float conversion");
3872 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3874 /* Move float to integer instruction. */
3875 else if ((opcode >> 1) == 0x03)
3878 debug_printf ("move float to int");
3880 if (!(opcode & 0x01))
3881 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3883 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3886 return AARCH64_RECORD_UNKNOWN;
3889 return AARCH64_RECORD_UNKNOWN;
3892 return AARCH64_RECORD_UNKNOWN;
3894 else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
3897 debug_printf ("SIMD copy");
3899 /* Advanced SIMD copy instructions. */
3900 if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
3901 && !bit (aarch64_insn_r->aarch64_insn, 15)
3902 && bit (aarch64_insn_r->aarch64_insn, 10))
3904 if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
3905 record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
3907 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3910 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3912 /* All remaining floating point or advanced SIMD instructions. */
3916 debug_printf ("all remain");
3918 record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
3922 debug_printf ("\n");
3924 aarch64_insn_r->reg_rec_count++;
3925 gdb_assert (aarch64_insn_r->reg_rec_count == 1);
3926 REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3928 return AARCH64_RECORD_SUCCESS;
3931 /* Decodes insns type and invokes its record handler. */
3934 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
3936 uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
3938 ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
3939 ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
3940 ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
3941 ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
3943 /* Data processing - immediate instructions. */
3944 if (!ins_bit26 && !ins_bit27 && ins_bit28)
3945 return aarch64_record_data_proc_imm (aarch64_insn_r);
3947 /* Branch, exception generation and system instructions. */
3948 if (ins_bit26 && !ins_bit27 && ins_bit28)
3949 return aarch64_record_branch_except_sys (aarch64_insn_r);
3951 /* Load and store instructions. */
3952 if (!ins_bit25 && ins_bit27)
3953 return aarch64_record_load_store (aarch64_insn_r);
3955 /* Data processing - register instructions. */
3956 if (ins_bit25 && !ins_bit26 && ins_bit27)
3957 return aarch64_record_data_proc_reg (aarch64_insn_r);
3959 /* Data processing - SIMD and floating point instructions. */
3960 if (ins_bit25 && ins_bit26 && ins_bit27)
3961 return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
3963 return AARCH64_RECORD_UNSUPPORTED;
3966 /* Cleans up local record registers and memory allocations. */
3969 deallocate_reg_mem (insn_decode_record *record)
3971 xfree (record->aarch64_regs);
3972 xfree (record->aarch64_mems);
3976 namespace selftests {
3979 aarch64_process_record_test (void)
3981 struct gdbarch_info info;
3984 gdbarch_info_init (&info);
3985 info.bfd_arch_info = bfd_scan_arch ("aarch64");
3987 struct gdbarch *gdbarch = gdbarch_find_by_info (info);
3988 SELF_CHECK (gdbarch != NULL);
3990 insn_decode_record aarch64_record;
3992 memset (&aarch64_record, 0, sizeof (insn_decode_record));
3993 aarch64_record.regcache = NULL;
3994 aarch64_record.this_addr = 0;
3995 aarch64_record.gdbarch = gdbarch;
3997 /* 20 00 80 f9 prfm pldl1keep, [x1] */
3998 aarch64_record.aarch64_insn = 0xf9800020;
3999 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4000 SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4001 SELF_CHECK (aarch64_record.reg_rec_count == 0);
4002 SELF_CHECK (aarch64_record.mem_rec_count == 0);
4004 deallocate_reg_mem (&aarch64_record);
4007 } // namespace selftests
4008 #endif /* GDB_SELF_TEST */
4010 /* Parse the current instruction and record the values of the registers and
4011 memory that will be changed in current instruction to record_arch_list
4012 return -1 if something is wrong. */
4015 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4016 CORE_ADDR insn_addr)
4018 uint32_t rec_no = 0;
4019 uint8_t insn_size = 4;
4021 gdb_byte buf[insn_size];
4022 insn_decode_record aarch64_record;
4024 memset (&buf[0], 0, insn_size);
4025 memset (&aarch64_record, 0, sizeof (insn_decode_record));
4026 target_read_memory (insn_addr, &buf[0], insn_size);
4027 aarch64_record.aarch64_insn
4028 = (uint32_t) extract_unsigned_integer (&buf[0],
4030 gdbarch_byte_order (gdbarch));
4031 aarch64_record.regcache = regcache;
4032 aarch64_record.this_addr = insn_addr;
4033 aarch64_record.gdbarch = gdbarch;
4035 ret = aarch64_record_decode_insn_handler (&aarch64_record);
4036 if (ret == AARCH64_RECORD_UNSUPPORTED)
4038 printf_unfiltered (_("Process record does not support instruction "
4039 "0x%0x at address %s.\n"),
4040 aarch64_record.aarch64_insn,
4041 paddress (gdbarch, insn_addr));
4047 /* Record registers. */
4048 record_full_arch_list_add_reg (aarch64_record.regcache,
4050 /* Always record register CPSR. */
4051 record_full_arch_list_add_reg (aarch64_record.regcache,
4052 AARCH64_CPSR_REGNUM);
4053 if (aarch64_record.aarch64_regs)
4054 for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4055 if (record_full_arch_list_add_reg (aarch64_record.regcache,
4056 aarch64_record.aarch64_regs[rec_no]))
4059 /* Record memories. */
4060 if (aarch64_record.aarch64_mems)
4061 for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4062 if (record_full_arch_list_add_mem
4063 ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4064 aarch64_record.aarch64_mems[rec_no].len))
4067 if (record_full_arch_list_add_end ())
4071 deallocate_reg_mem (&aarch64_record);